From d23b8c918fdcc0a84001ab2ed3e33d691a8403b0 Mon Sep 17 00:00:00 2001 From: Rodrigo Souza Date: Thu, 30 Sep 2021 20:35:20 -0700 Subject: [PATCH 01/10] Update README.md (#20992) --- sdk/cosmos/azure-cosmos/README.md | 40 +++++++++++++++---------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/sdk/cosmos/azure-cosmos/README.md b/sdk/cosmos/azure-cosmos/README.md index dd6f1428aa06..88a8232cdc0c 100644 --- a/sdk/cosmos/azure-cosmos/README.md +++ b/sdk/cosmos/azure-cosmos/README.md @@ -96,33 +96,33 @@ Currently the features below are **not supported**. For alternatives options, ch ### Data Plane Limitations: -* Group By queries (in roadmap for 2021). -* Language Native async i/o (in roadmap for 2021). -* Queries with COUNT from a DISTINCT subquery: SELECT COUNT (1) FROM (SELECT DISTINCT C.ID FROM C). -* Bulk/Transactional batch processing. -* Direct TCP Mode access. -* Continuation token for cross partitions queries. -* Change Feed: Processor. -* Change Feed: Read multiple partitions key values. -* Change Feed: Read specific time. -* Change Feed: Read from the beggining. -* Change Feed: Pull model. -* Cross-partition ORDER BY for mixed types. +* Group By queries +* Language Native async i/o +* Queries with COUNT from a DISTINCT subquery: SELECT COUNT (1) FROM (SELECT DISTINCT C.ID FROM C) +* Bulk/Transactional batch processing +* Direct TCP Mode access +* Continuation token for cross partitions queries +* Change Feed: Processor +* Change Feed: Read multiple partitions key values +* Change Feed: Read specific time +* Change Feed: Read from the beggining +* Change Feed: Pull model +* Cross-partition ORDER BY for mixed types * Integrated Cache using the default consistency level, that is "Session". To take advantage of the new [Cosmos DB Integrated Cache](https://docs.microsoft.com/azure/cosmos-db/integrated-cache), it is required to explicitly set CosmosClient consistency level to "Eventual": `consistency_level= Eventual`. ### Control Plane Limitations: -* Get CollectionSizeUsage, DatabaseUsage, and DocumentUsage metrics. -* Create Geospatial Index. -* Provision Autoscale DBs or containers. -* Update Autoscale throughput. -* Update analytical store ttl (time to live). -* Get the connection string. -* Get the minimum RU/s of a container. +* Get CollectionSizeUsage, DatabaseUsage, and DocumentUsage metrics +* Create Geospatial Index +* Provision Autoscale DBs or containers +* Update Autoscale throughput +* Update analytical store ttl (time to live) +* Get the connection string +* Get the minimum RU/s of a container ### Security Limitations: -* AAD support. +* AAD support ## Workarounds From 969195d94a0c20f2cb024ce82d4d79de054e4595 Mon Sep 17 00:00:00 2001 From: Krista Pratico Date: Fri, 1 Oct 2021 09:06:04 -0700 Subject: [PATCH 02/10] add prebuilt document to readme examples + print styles (#20996) --- .../azure-ai-formrecognizer/README.md | 78 +++++++++++++++++++ .../sample_analyze_prebuilt_document_async.py | 10 +-- .../sample_analyze_prebuilt_document.py | 10 +-- 3 files changed, 86 insertions(+), 12 deletions(-) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/README.md b/sdk/formrecognizer/azure-ai-formrecognizer/README.md index 84d8dc5b62eb..a66d8ea808e3 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/README.md +++ b/sdk/formrecognizer/azure-ai-formrecognizer/README.md @@ -191,6 +191,7 @@ The following section provides several code snippets covering some of the most c * [Extract layout](#extract-layout "Extract Layout") * [Using Prebuilt Models](#using-prebuilt-models "Using Prebuilt Models") +* [Using Prebuilt Document](#using-prebuilt-document "Using Prebuilt Document") * [Build a Model](#build-a-model "Build a model") * [Analyze Documents Using a Custom Model](#analyze-documents-using-a-custom-model "Analyze Documents Using a Custom Model") * [Manage Your Models](#manage-your-models "Manage Your Models") @@ -310,6 +311,83 @@ You are not limited to receipts! There are a few prebuilt models to choose from, - Analyze invoices using the `prebuilt-invoice` model (fields recognized by the service can be found [here][service_recognize_invoice]). - Analyze identity documents using the `prebuilt-idDocuments` model (fields recognized by the service can be found [here][service_recognize_identity_documents]). +### Using Prebuilt Document +Analyze entities, key-value pairs, tables, styles, and selection marks from documents using the general prebuilt document model provided by the Form Recognizer service. +Select the Prebuilt Document model by passing `model="prebuilt-document"` into the `begin_analyze_documents` method: + +```python +from azure.ai.formrecognizer import DocumentAnalysisClient +from azure.core.credentials import AzureKeyCredential + +endpoint = "https://.cognitiveservices.azure.com/" +credential = AzureKeyCredential("") + +document_analysis_client = DocumentAnalysisClient(endpoint, credential) + +with open("", "rb") as fd: + document = fd.read() + +poller = document_analysis_client.begin_analyze_document("prebuilt-document", document) +result = poller.result() + +print("----Entities found in document----") +for entity in result.entities: + print("Entity '{}' has category '{}' with sub-category '{}'".format( + entity.content, entity.category, entity.sub_category + )) + print("...with confidence {}\n".format(entity.confidence)) + +print("----Key-value pairs found in document----") +for kv_pair in result.key_value_pairs: + if kv_pair.key: + print( + "Key '{}' found within '{}' bounding regions".format( + kv_pair.key.content, + kv_pair.key.bounding_regions, + ) + ) + if kv_pair.value: + print( + "Value '{}' found within '{}' bounding regions\n".format( + kv_pair.value.content, + kv_pair.value.bounding_regions, + ) + ) + +print("----Tables found in document----") +for table_idx, table in enumerate(result.tables): + print( + "Table # {} has {} rows and {} columns".format( + table_idx, table.row_count, table.column_count + ) + ) + for region in table.bounding_regions: + print( + "Table # {} location on page: {} is {}".format( + table_idx, + region.page_number, + region.bounding_box, + ) + ) + +print("----Styles found in document----") +for style in result.styles: + if style.is_handwritten: + print("Document contains handwritten content: ") + print(",".join([result.content[span.offset:span.offset + span.length] for span in style.spans])) + +print("----Selection marks found in document----") +for page in result.pages: + for selection_mark in page.selection_marks: + print( + "...Selection mark is '{}' within bounding box '{}' and has a confidence of {}".format( + selection_mark.state, + selection_mark.bounding_box, + selection_mark.confidence, + ) + ) +``` + ### Build a model Build a custom model on your own document type. The resulting model can be used to analyze values from the types of documents it was trained on. Provide a container SAS URL to your Azure Storage Blob container where you're storing the training documents. diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_prebuilt_document_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_prebuilt_document_async.py index cd77e07fab21..a6379520b2eb 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_prebuilt_document_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_prebuilt_document_async.py @@ -63,12 +63,10 @@ async def analyze_document(): ) result = await poller.result() - for idx, style in enumerate(result.styles): - print( - "Document contains {} content".format( - "handwritten" if style.is_handwritten else "no handwritten" - ) - ) + for style in result.styles: + if style.is_handwritten: + print("Document contains handwritten content: ") + print(",".join([result.content[span.offset:span.offset + span.length] for span in style.spans])) for idx, page in enumerate(result.pages): print("----Analyzing document from page #{}----".format(idx + 1)) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_prebuilt_document.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_prebuilt_document.py index 47141b26be86..e7ab3f134fab 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_prebuilt_document.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_prebuilt_document.py @@ -59,12 +59,10 @@ def analyze_document(): ) result = poller.result() - for idx, style in enumerate(result.styles): - print( - "Document contains {} content".format( - "handwritten" if style.is_handwritten else "no handwritten" - ) - ) + for style in result.styles: + if style.is_handwritten: + print("Document contains handwritten content: ") + print(",".join([result.content[span.offset:span.offset + span.length] for span in style.spans])) for page in result.pages: print("----Analyzing document from page #{}----".format(page.page_number)) From 0481e9dd626899d07f6b2d8b082ae1f2997ec311 Mon Sep 17 00:00:00 2001 From: AlonsoMondal <80124074+AlonsoMondal@users.noreply.github.com> Date: Fri, 1 Oct 2021 11:34:27 -0600 Subject: [PATCH 03/10] updating codeowners for azure communication services SMS & PhoneNumbers (#20942) * updating codeowners for azure communication services SMS & PhoneNumbers * adding '/' to directories --- .github/CODEOWNERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 784855f89a95..6dc495c82d27 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -39,6 +39,8 @@ # PRLabel: %Communication /sdk/communication/ @acsdevx-msft +/sdk/communication/azure-communication-phonenumbers/ @RoyHerrod @danielav7 @whisper6284 @AlonsoMondal +/sdk/communication/azure-communication-sms/ @RoyHerrod @arifibrahim4 # PRLabel: %KeyVault /sdk/keyvault/ @schaabs @chlowell @mccoyp @YalinLi0312 From 08b17d864efe593d65b44ed46fb9ee55350f3729 Mon Sep 17 00:00:00 2001 From: Travis Prescott Date: Fri, 1 Oct 2021 13:04:30 -0500 Subject: [PATCH 04/10] [Search] Regenerate Search SDK (#20977) * Update README and setup.py for Python 2.7 and 3.10 changes. * Regenerate with latest Autorest. * Search-specific updates. * Expose generated changes. * Reflect latest swagger updates. * Expose kwargs via custom client. * Update changelog. --- .../azure-search-documents/CHANGELOG.md | 11 +- sdk/search/azure-search-documents/README.md | 3 + .../search/documents/_generated/_vendor.py | 27 + .../aio/operations/_documents_operations.py | 46 +- .../documents/_generated/models/__init__.py | 6 + .../documents/_generated/models/_models.py | 887 ++- .../_generated/models/_models_py3.py | 893 ++- .../_generated/models/_search_client_enums.py | 52 +- .../operations/_documents_operations.py | 47 +- .../azure/search/documents/_search_client.py | 6 +- .../documents/aio/_search_client_async.py | 6 +- .../documents/indexes/_generated/_vendor.py | 27 + .../operations/_data_sources_operations.py | 34 +- .../aio/operations/_indexers_operations.py | 61 +- .../aio/operations/_indexes_operations.py | 39 +- .../operations/_search_client_operations.py | 6 +- .../aio/operations/_skillsets_operations.py | 100 +- .../operations/_synonym_maps_operations.py | 26 +- .../indexes/_generated/models/__init__.py | 9 +- .../indexes/_generated/models/_models.py | 5280 +++++++++++----- .../indexes/_generated/models/_models_py3.py | 5288 ++++++++++++----- .../operations/_data_sources_operations.py | 41 +- .../operations/_indexers_operations.py | 68 +- .../operations/_indexes_operations.py | 40 +- .../operations/_search_client_operations.py | 6 +- .../operations/_skillsets_operations.py | 145 +- .../operations/_synonym_maps_operations.py | 27 +- .../indexes/_search_indexer_client.py | 13 + .../indexes/aio/_search_indexer_client.py | 12 + .../azure/search/documents/models/__init__.py | 12 +- .../sample_semantic_search_async.py | 2 +- .../samples/sample_semantic_search.py | 2 +- sdk/search/azure-search-documents/setup.py | 1 + 33 files changed, 9302 insertions(+), 3921 deletions(-) create mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/_vendor.py create mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_vendor.py diff --git a/sdk/search/azure-search-documents/CHANGELOG.md b/sdk/search/azure-search-documents/CHANGELOG.md index c5911ab8e9a2..2e1ad6ff973d 100644 --- a/sdk/search/azure-search-documents/CHANGELOG.md +++ b/sdk/search/azure-search-documents/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 11.3.0b4 (Unreleased) +## 11.3.0b4 (2021-10-05) ### Features Added @@ -10,19 +10,18 @@ ### Breaking Changes - Renamed `SearchClient.speller` to `SearchClient.query_speller`. +- Renamed model `Speller` to `QuerySpellerType`. +- Renamed model `Answers` to `QueryAnswerType`. - Removed keyword arguments from `SearchClient`: `answers` and `captions`. - `SentimentSkill`, `EntityRecognitionSkill`: added client-side validation to prevent sending unsupported parameters. - -### Bugs Fixed - -### Other Changes +- Renamed property `ignore_reset_requirements` to `skip_indexer_reset_requirement_for_cache`. ## 11.3.0b3 (2021-09-08) ### Features Added - Added new models: - - `azure.search.documents.models.Captions` + - `azure.search.documents.models.QueryCaptionType` - `azure.search.documents.models.CaptionResult` - `azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage` - `azure.search.documents.indexes.models.EntityRecognitionSkillVersion` diff --git a/sdk/search/azure-search-documents/README.md b/sdk/search/azure-search-documents/README.md index ecda3962f4e2..abc438a29800 100644 --- a/sdk/search/azure-search-documents/README.md +++ b/sdk/search/azure-search-documents/README.md @@ -40,6 +40,9 @@ Use the Azure.Search.Documents client library to: [Product documentation](https://docs.microsoft.com/azure/search/search-what-is-azure-search) | [Samples](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/search/azure-search-documents/samples) +## _Disclaimer_ + +_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_ ## Getting started diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_vendor.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_vendor.py new file mode 100644 index 000000000000..138f663c53a4 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_vendor.py @@ -0,0 +1,27 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.core.pipeline.transport import HttpRequest + +def _convert_request(request, files=None): + data = request.content if not files else None + request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data) + if files: + request.set_formdata_body(files) + return request + +def _format_url_section(template, **kwargs): + components = template.split("/") + while components: + try: + return template.format(**kwargs) + except KeyError as key: + formatted_components = template.split("/") + components = [ + c for c in formatted_components if "{}".format(key.args[0]) not in c + ] + template = "/".join(components) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py index bd89a2bfee90..f80a00395d23 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py @@ -16,6 +16,7 @@ from azure.core.tracing.decorator_async import distributed_trace_async from ... import models as _models +from ..._vendor import _convert_request from ...operations._documents_operations import build_autocomplete_get_request, build_autocomplete_post_request, build_count_request, build_get_request, build_index_request, build_search_get_request, build_search_post_request, build_suggest_get_request, build_suggest_post_request T = TypeVar('T') @@ -71,7 +72,8 @@ async def count( request = build_count_request( x_ms_client_request_id=_x_ms_client_request_id, template_url=self.count.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -83,7 +85,7 @@ async def count( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('long', pipeline_response) @@ -202,7 +204,8 @@ async def search_get( semantic_fields=_semantic_fields, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.search_get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -214,7 +217,7 @@ async def search_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchDocumentsResult', pipeline_response) @@ -263,7 +266,8 @@ async def search_post( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.search_post.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -275,7 +279,7 @@ async def search_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchDocumentsResult', pipeline_response) @@ -325,7 +329,8 @@ async def get( selected_fields=selected_fields, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -337,7 +342,7 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('object', pipeline_response) @@ -419,7 +424,8 @@ async def suggest_get( top=_top, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.suggest_get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -431,7 +437,7 @@ async def suggest_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response) @@ -480,7 +486,8 @@ async def suggest_post( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.suggest_post.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -492,7 +499,7 @@ async def suggest_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response) @@ -542,7 +549,8 @@ async def index( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.index.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -554,7 +562,7 @@ async def index( if response.status_code not in [200, 207]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if response.status_code == 200: @@ -636,7 +644,8 @@ async def autocomplete_get( search_fields=_search_fields, top=_top, template_url=self.autocomplete_get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -648,7 +657,7 @@ async def autocomplete_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AutocompleteResult', pipeline_response) @@ -697,7 +706,8 @@ async def autocomplete_post( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.autocomplete_post.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -709,7 +719,7 @@ async def autocomplete_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AutocompleteResult', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py index cec096d25420..014f69d11880 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py @@ -56,7 +56,10 @@ AutocompleteMode, Captions, IndexActionType, + QueryAnswerType, + QueryCaptionType, QueryLanguage, + QuerySpellerType, QueryType, ScoringStatistics, SearchMode, @@ -89,7 +92,10 @@ 'AutocompleteMode', 'Captions', 'IndexActionType', + 'QueryAnswerType', + 'QueryCaptionType', 'QueryLanguage', + 'QuerySpellerType', 'QueryType', 'ScoringStatistics', 'SearchMode', diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py index 7993e91f7cfe..735efd768921 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py @@ -15,9 +15,9 @@ class AnswerResult(msrest.serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] + :vartype additional_properties: dict[str, any] :ivar score: The score value represents how relevant the answer is to the the query relative to other answers returned for the query. :vartype score: float @@ -49,6 +49,11 @@ def __init__( self, **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + """ super(AnswerResult, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.score = None @@ -84,6 +89,8 @@ def __init__( self, **kwargs ): + """ + """ super(AutocompleteItem, self).__init__(**kwargs) self.text = None self.query_plus_text = None @@ -92,36 +99,36 @@ def __init__( class AutocompleteOptions(msrest.serialization.Model): """Parameter group. - :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use + :ivar autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext". - :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode - :keyword filter: An OData expression that filters the documents used to produce completed terms + :vartype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode + :ivar filter: An OData expression that filters the documents used to produce completed terms for the Autocomplete result. - :paramtype filter: str - :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the - autocomplete query. Default is false. When set to true, the query will find terms even if - there's a substituted or missing character in the search text. While this provides a better - experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are - slower and consume more resources. - :paramtype use_fuzzy_matching: bool - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + :vartype filter: str + :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the autocomplete + query. Default is false. When set to true, the query will find terms even if there's a + substituted or missing character in the search text. While this provides a better experience in + some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and + consume more resources. + :vartype use_fuzzy_matching: bool + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting is disabled. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting is disabled. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by an autocomplete query in order for the query to be reported as a - success. This parameter can be useful for ensuring search availability even for services with - only one replica. The default is 80. - :paramtype minimum_coverage: float - :keyword search_fields: The list of field names to consider when querying for auto-completed + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be covered by an autocomplete query in order for the query to be reported as a success. + This parameter can be useful for ensuring search availability even for services with only one + replica. The default is 80. + :vartype minimum_coverage: float + :ivar search_fields: The list of field names to consider when querying for auto-completed terms. Target fields must be included in the specified suggester. - :paramtype search_fields: list[str] - :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1 - and 100. The default is 5. - :paramtype top: int + :vartype search_fields: list[str] + :ivar top: The number of auto-completed terms to retrieve. This must be a value between 1 and + 100. The default is 5. + :vartype top: int """ _attribute_map = { @@ -139,6 +146,38 @@ def __init__( self, **kwargs ): + """ + :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use + 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing + auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext". + :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode + :keyword filter: An OData expression that filters the documents used to produce completed terms + for the Autocomplete result. + :paramtype filter: str + :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the + autocomplete query. Default is false. When set to true, the query will find terms even if + there's a substituted or missing character in the search text. While this provides a better + experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are + slower and consume more resources. + :paramtype use_fuzzy_matching: bool + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting is disabled. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting is disabled. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be covered by an autocomplete query in order for the query to be reported as a + success. This parameter can be useful for ensuring search availability even for services with + only one replica. The default is 80. + :paramtype minimum_coverage: float + :keyword search_fields: The list of field names to consider when querying for auto-completed + terms. Target fields must be included in the specified suggester. + :paramtype search_fields: list[str] + :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1 + and 100. The default is 5. + :paramtype top: int + """ super(AutocompleteOptions, self).__init__(**kwargs) self.autocomplete_mode = kwargs.get('autocomplete_mode', None) self.filter = kwargs.get('filter', None) @@ -155,41 +194,41 @@ class AutocompleteRequest(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword search_text: Required. The search text on which to base autocomplete results. - :paramtype search_text: str - :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use + :ivar search_text: Required. The search text on which to base autocomplete results. + :vartype search_text: str + :ivar autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext". - :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode - :keyword filter: An OData expression that filters the documents used to produce completed terms + :vartype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode + :ivar filter: An OData expression that filters the documents used to produce completed terms for the Autocomplete result. - :paramtype filter: str - :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the - autocomplete query. Default is false. When set to true, the query will autocomplete terms even - if there's a substituted or missing character in the search text. While this provides a better - experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are - slower and consume more resources. - :paramtype use_fuzzy_matching: bool - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + :vartype filter: str + :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the autocomplete + query. Default is false. When set to true, the query will autocomplete terms even if there's a + substituted or missing character in the search text. While this provides a better experience in + some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and + consume more resources. + :vartype use_fuzzy_matching: bool + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting is disabled. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting is disabled. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by an autocomplete query in order for the query to be reported as a - success. This parameter can be useful for ensuring search availability even for services with - only one replica. The default is 80. - :paramtype minimum_coverage: float - :keyword search_fields: The comma-separated list of field names to consider when querying for + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be covered by an autocomplete query in order for the query to be reported as a success. + This parameter can be useful for ensuring search availability even for services with only one + replica. The default is 80. + :vartype minimum_coverage: float + :ivar search_fields: The comma-separated list of field names to consider when querying for auto-completed terms. Target fields must be included in the specified suggester. - :paramtype search_fields: str - :keyword suggester_name: Required. The name of the suggester as specified in the suggesters + :vartype search_fields: str + :ivar suggester_name: Required. The name of the suggester as specified in the suggesters collection that's part of the index definition. - :paramtype suggester_name: str - :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1 - and 100. The default is 5. - :paramtype top: int + :vartype suggester_name: str + :ivar top: The number of auto-completed terms to retrieve. This must be a value between 1 and + 100. The default is 5. + :vartype top: int """ _validation = { @@ -214,6 +253,43 @@ def __init__( self, **kwargs ): + """ + :keyword search_text: Required. The search text on which to base autocomplete results. + :paramtype search_text: str + :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use + 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing + auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext". + :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode + :keyword filter: An OData expression that filters the documents used to produce completed terms + for the Autocomplete result. + :paramtype filter: str + :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the + autocomplete query. Default is false. When set to true, the query will autocomplete terms even + if there's a substituted or missing character in the search text. While this provides a better + experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are + slower and consume more resources. + :paramtype use_fuzzy_matching: bool + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting is disabled. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting is disabled. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be covered by an autocomplete query in order for the query to be reported as a + success. This parameter can be useful for ensuring search availability even for services with + only one replica. The default is 80. + :paramtype minimum_coverage: float + :keyword search_fields: The comma-separated list of field names to consider when querying for + auto-completed terms. Target fields must be included in the specified suggester. + :paramtype search_fields: str + :keyword suggester_name: Required. The name of the suggester as specified in the suggesters + collection that's part of the index definition. + :paramtype suggester_name: str + :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1 + and 100. The default is 5. + :paramtype top: int + """ super(AutocompleteRequest, self).__init__(**kwargs) self.search_text = kwargs['search_text'] self.autocomplete_mode = kwargs.get('autocomplete_mode', None) @@ -255,6 +331,8 @@ def __init__( self, **kwargs ): + """ + """ super(AutocompleteResult, self).__init__(**kwargs) self.coverage = None self.results = None @@ -265,9 +343,9 @@ class CaptionResult(msrest.serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] + :vartype additional_properties: dict[str, any] :ivar text: A representative text passage extracted from the document most relevant to the search query. :vartype text: str @@ -291,6 +369,11 @@ def __init__( self, **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + """ super(CaptionResult, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.text = None @@ -302,9 +385,9 @@ class FacetResult(msrest.serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] + :vartype additional_properties: dict[str, any] :ivar count: The approximate count of documents falling within the bucket described by this facet. :vartype count: long @@ -323,6 +406,11 @@ def __init__( self, **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + """ super(FacetResult, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.count = None @@ -331,12 +419,12 @@ def __init__( class IndexAction(msrest.serialization.Model): """Represents an index action that operates on a document. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] - :keyword action_type: The operation to perform on a document in an indexing batch. Possible - values include: "upload", "merge", "mergeOrUpload", "delete". - :paramtype action_type: str or ~azure.search.documents.models.IndexActionType + :vartype additional_properties: dict[str, any] + :ivar action_type: The operation to perform on a document in an indexing batch. Possible values + include: "upload", "merge", "mergeOrUpload", "delete". + :vartype action_type: str or ~azure.search.documents.models.IndexActionType """ _attribute_map = { @@ -348,6 +436,14 @@ def __init__( self, **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + :keyword action_type: The operation to perform on a document in an indexing batch. Possible + values include: "upload", "merge", "mergeOrUpload", "delete". + :paramtype action_type: str or ~azure.search.documents.models.IndexActionType + """ super(IndexAction, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.action_type = kwargs.get('action_type', None) @@ -358,8 +454,8 @@ class IndexBatch(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword actions: Required. The actions in the batch. - :paramtype actions: list[~azure.search.documents.models.IndexAction] + :ivar actions: Required. The actions in the batch. + :vartype actions: list[~azure.search.documents.models.IndexAction] """ _validation = { @@ -374,6 +470,10 @@ def __init__( self, **kwargs ): + """ + :keyword actions: Required. The actions in the batch. + :paramtype actions: list[~azure.search.documents.models.IndexAction] + """ super(IndexBatch, self).__init__(**kwargs) self.actions = kwargs['actions'] @@ -402,6 +502,8 @@ def __init__( self, **kwargs ): + """ + """ super(IndexDocumentsResult, self).__init__(**kwargs) self.results = None @@ -446,6 +548,8 @@ def __init__( self, **kwargs ): + """ + """ super(IndexingResult, self).__init__(**kwargs) self.key = None self.error_message = None @@ -456,8 +560,8 @@ def __init__( class RequestOptions(msrest.serialization.Model): """Parameter group. - :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging. - :paramtype x_ms_client_request_id: str + :ivar x_ms_client_request_id: The tracking ID sent with the request to help with debugging. + :vartype x_ms_client_request_id: str """ _attribute_map = { @@ -468,6 +572,10 @@ def __init__( self, **kwargs ): + """ + :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging. + :paramtype x_ms_client_request_id: str + """ super(RequestOptions, self).__init__(**kwargs) self.x_ms_client_request_id = kwargs.get('x_ms_client_request_id', None) @@ -531,6 +639,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchDocumentsResult, self).__init__(**kwargs) self.count = None self.coverage = None @@ -572,6 +682,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchError, self).__init__(**kwargs) self.code = None self.message = None @@ -581,98 +693,97 @@ def __init__( class SearchOptions(msrest.serialization.Model): """Parameter group. - :keyword include_total_result_count: A value that specifies whether to fetch the total count of + :ivar include_total_result_count: A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. - :paramtype include_total_result_count: bool - :keyword facets: The list of facet expressions to apply to the search query. Each facet - expression contains a field name, optionally followed by a comma-separated list of name:value - pairs. - :paramtype facets: list[str] - :keyword filter: The OData $filter expression to apply to the search query. - :paramtype filter: str - :keyword highlight_fields: The list of field names to use for hit highlights. Only searchable + :vartype include_total_result_count: bool + :ivar facets: The list of facet expressions to apply to the search query. Each facet expression + contains a field name, optionally followed by a comma-separated list of name:value pairs. + :vartype facets: list[str] + :ivar filter: The OData $filter expression to apply to the search query. + :vartype filter: str + :ivar highlight_fields: The list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. - :paramtype highlight_fields: list[str] - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + :vartype highlight_fields: list[str] + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by a search query in order for the query to be reported as a success. This + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. - :paramtype minimum_coverage: float - :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each + :vartype minimum_coverage: float + :ivar order_by: The list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, and desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no OrderBy is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. - :paramtype order_by: list[str] - :keyword query_type: A value that specifies the syntax of the search query. The default is + :vartype order_by: list[str] + :ivar query_type: A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include: "simple", "full", "semantic". - :paramtype query_type: str or ~azure.search.documents.models.QueryType - :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for + :vartype query_type: str or ~azure.search.documents.models.QueryType + :ivar scoring_parameters: The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). - :paramtype scoring_parameters: list[str] - :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching + :vartype scoring_parameters: list[str] + :ivar scoring_profile: The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. - :paramtype scoring_profile: str - :keyword search_fields: The list of field names to which to scope the full-text search. When - using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of - each fielded search expression take precedence over any field names listed in this parameter. - :paramtype search_fields: list[str] - :keyword query_language: The language of the query. Possible values include: "none", "en-us". - :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage - :keyword speller: Improve search recall by spell-correcting individual search query terms. + :vartype scoring_profile: str + :ivar search_fields: The list of field names to which to scope the full-text search. When using + fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each + fielded search expression take precedence over any field names listed in this parameter. + :vartype search_fields: list[str] + :ivar query_language: The language of the query. Possible values include: "none", "en-us". + :vartype query_language: str or ~azure.search.documents.models.QueryLanguage + :ivar speller: Improve search recall by spell-correcting individual search query terms. Possible values include: "none", "lexicon". - :paramtype speller: str or ~azure.search.documents.models.Speller - :keyword answers: This parameter is only valid if the query type is 'semantic'. If set, the - query returns answers extracted from key passages in the highest ranked documents. The number - of answers returned can be configured by appending the pipe character '|' followed by the + :vartype speller: str or ~azure.search.documents.models.Speller + :ivar answers: This parameter is only valid if the query type is 'semantic'. If set, the query + returns answers extracted from key passages in the highest ranked documents. The number of + answers returned can be configured by appending the pipe character '|' followed by the 'count-:code:``' option after the answers parameter value, such as 'extractive|count-3'. Default count is 1. Possible values include: "none", "extractive". - :paramtype answers: str or ~azure.search.documents.models.Answers - :keyword search_mode: A value that specifies whether any or all of the search terms must be + :vartype answers: str or ~azure.search.documents.models.Answers + :ivar search_mode: A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. Possible values include: "any", "all". - :paramtype search_mode: str or ~azure.search.documents.models.SearchMode - :keyword scoring_statistics: A value that specifies whether we want to calculate scoring + :vartype search_mode: str or ~azure.search.documents.models.SearchMode + :ivar scoring_statistics: A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. Possible values include: "local", "global". - :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics - :keyword session_id: A value to be used to create a sticky session, which can help to get more + :vartype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics + :ivar session_id: A value to be used to create a sticky session, which can help to get more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. - :paramtype session_id: str - :keyword select: The list of fields to retrieve. If unspecified, all fields marked as - retrievable in the schema are included. - :paramtype select: list[str] - :keyword skip: The number of search results to skip. This value cannot be greater than 100,000. - If you need to scan documents in sequence, but cannot use $skip due to this limitation, - consider using $orderby on a totally-ordered key and $filter with a range query instead. - :paramtype skip: int - :keyword top: The number of search results to retrieve. This can be used in conjunction with - $skip to implement client-side paging of search results. If results are truncated due to - server-side paging, the response will include a continuation token that can be used to issue - another Search request for the next page of results. - :paramtype top: int - :keyword captions: This parameter is only valid if the query type is 'semantic'. If set, the - query returns captions extracted from key passages in the highest ranked documents. When - Captions is set to 'extractive', highlighting is enabled by default, and can be configured by - appending the pipe character '|' followed by the 'highlight-' option, such as + :vartype session_id: str + :ivar select: The list of fields to retrieve. If unspecified, all fields marked as retrievable + in the schema are included. + :vartype select: list[str] + :ivar skip: The number of search results to skip. This value cannot be greater than 100,000. If + you need to scan documents in sequence, but cannot use $skip due to this limitation, consider + using $orderby on a totally-ordered key and $filter with a range query instead. + :vartype skip: int + :ivar top: The number of search results to retrieve. This can be used in conjunction with $skip + to implement client-side paging of search results. If results are truncated due to server-side + paging, the response will include a continuation token that can be used to issue another Search + request for the next page of results. + :vartype top: int + :ivar captions: This parameter is only valid if the query type is 'semantic'. If set, the query + returns captions extracted from key passages in the highest ranked documents. When Captions is + set to 'extractive', highlighting is enabled by default, and can be configured by appending the + pipe character '|' followed by the 'highlight-' option, such as 'extractive|highlight-true'. Defaults to 'None'. Possible values include: "none", "extractive". - :paramtype captions: str or ~azure.search.documents.models.Captions - :keyword semantic_fields: The list of field names used for semantic search. - :paramtype semantic_fields: list[str] + :vartype captions: str or ~azure.search.documents.models.Captions + :ivar semantic_fields: The list of field names used for semantic search. + :vartype semantic_fields: list[str] """ _attribute_map = { @@ -705,6 +816,100 @@ def __init__( self, **kwargs ): + """ + :keyword include_total_result_count: A value that specifies whether to fetch the total count of + results. Default is false. Setting this value to true may have a performance impact. Note that + the count returned is an approximation. + :paramtype include_total_result_count: bool + :keyword facets: The list of facet expressions to apply to the search query. Each facet + expression contains a field name, optionally followed by a comma-separated list of name:value + pairs. + :paramtype facets: list[str] + :keyword filter: The OData $filter expression to apply to the search query. + :paramtype filter: str + :keyword highlight_fields: The list of field names to use for hit highlights. Only searchable + fields can be used for hit highlighting. + :paramtype highlight_fields: list[str] + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. Default is </em>. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. Default is <em>. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be covered by a search query in order for the query to be reported as a success. This + parameter can be useful for ensuring search availability even for services with only one + replica. The default is 100. + :paramtype minimum_coverage: float + :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each + expression can be either a field name or a call to either the geo.distance() or the + search.score() functions. Each expression can be followed by asc to indicate ascending, and + desc to indicate descending. The default is ascending order. Ties will be broken by the match + scores of documents. If no OrderBy is specified, the default sort order is descending by + document match score. There can be at most 32 $orderby clauses. + :paramtype order_by: list[str] + :keyword query_type: A value that specifies the syntax of the search query. The default is + 'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include: + "simple", "full", "semantic". + :paramtype query_type: str or ~azure.search.documents.models.QueryType + :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for + example, referencePointParameter) using the format name-values. For example, if the scoring + profile defines a function with a parameter called 'mylocation' the parameter string would be + "mylocation--122.2,44.8" (without the quotes). + :paramtype scoring_parameters: list[str] + :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching + documents in order to sort the results. + :paramtype scoring_profile: str + :keyword search_fields: The list of field names to which to scope the full-text search. When + using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of + each fielded search expression take precedence over any field names listed in this parameter. + :paramtype search_fields: list[str] + :keyword query_language: The language of the query. Possible values include: "none", "en-us". + :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage + :keyword speller: Improve search recall by spell-correcting individual search query terms. + Possible values include: "none", "lexicon". + :paramtype speller: str or ~azure.search.documents.models.Speller + :keyword answers: This parameter is only valid if the query type is 'semantic'. If set, the + query returns answers extracted from key passages in the highest ranked documents. The number + of answers returned can be configured by appending the pipe character '|' followed by the + 'count-:code:``' option after the answers parameter value, such as + 'extractive|count-3'. Default count is 1. Possible values include: "none", "extractive". + :paramtype answers: str or ~azure.search.documents.models.Answers + :keyword search_mode: A value that specifies whether any or all of the search terms must be + matched in order to count the document as a match. Possible values include: "any", "all". + :paramtype search_mode: str or ~azure.search.documents.models.SearchMode + :keyword scoring_statistics: A value that specifies whether we want to calculate scoring + statistics (such as document frequency) globally for more consistent scoring, or locally, for + lower latency. Possible values include: "local", "global". + :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics + :keyword session_id: A value to be used to create a sticky session, which can help to get more + consistent results. As long as the same sessionId is used, a best-effort attempt will be made + to target the same replica set. Be wary that reusing the same sessionID values repeatedly can + interfere with the load balancing of the requests across replicas and adversely affect the + performance of the search service. The value used as sessionId cannot start with a '_' + character. + :paramtype session_id: str + :keyword select: The list of fields to retrieve. If unspecified, all fields marked as + retrievable in the schema are included. + :paramtype select: list[str] + :keyword skip: The number of search results to skip. This value cannot be greater than 100,000. + If you need to scan documents in sequence, but cannot use $skip due to this limitation, + consider using $orderby on a totally-ordered key and $filter with a range query instead. + :paramtype skip: int + :keyword top: The number of search results to retrieve. This can be used in conjunction with + $skip to implement client-side paging of search results. If results are truncated due to + server-side paging, the response will include a continuation token that can be used to issue + another Search request for the next page of results. + :paramtype top: int + :keyword captions: This parameter is only valid if the query type is 'semantic'. If set, the + query returns captions extracted from key passages in the highest ranked documents. When + Captions is set to 'extractive', highlighting is enabled by default, and can be configured by + appending the pipe character '|' followed by the 'highlight-' option, such as + 'extractive|highlight-true'. Defaults to 'None'. Possible values include: "none", "extractive". + :paramtype captions: str or ~azure.search.documents.models.Captions + :keyword semantic_fields: The list of field names used for semantic search. + :paramtype semantic_fields: list[str] + """ super(SearchOptions, self).__init__(**kwargs) self.include_total_result_count = kwargs.get('include_total_result_count', None) self.facets = kwargs.get('facets', None) @@ -734,99 +939,98 @@ def __init__( class SearchRequest(msrest.serialization.Model): """Parameters for filtering, sorting, faceting, paging, and other search query behaviors. - :keyword include_total_result_count: A value that specifies whether to fetch the total count of + :ivar include_total_result_count: A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. - :paramtype include_total_result_count: bool - :keyword facets: The list of facet expressions to apply to the search query. Each facet - expression contains a field name, optionally followed by a comma-separated list of name:value - pairs. - :paramtype facets: list[str] - :keyword filter: The OData $filter expression to apply to the search query. - :paramtype filter: str - :keyword highlight_fields: The comma-separated list of field names to use for hit highlights. - Only searchable fields can be used for hit highlighting. - :paramtype highlight_fields: str - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + :vartype include_total_result_count: bool + :ivar facets: The list of facet expressions to apply to the search query. Each facet expression + contains a field name, optionally followed by a comma-separated list of name:value pairs. + :vartype facets: list[str] + :ivar filter: The OData $filter expression to apply to the search query. + :vartype filter: str + :ivar highlight_fields: The comma-separated list of field names to use for hit highlights. Only + searchable fields can be used for hit highlighting. + :vartype highlight_fields: str + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by a search query in order for the query to be reported as a success. This + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. - :paramtype minimum_coverage: float - :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the + :vartype minimum_coverage: float + :ivar order_by: The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. - :paramtype order_by: str - :keyword query_type: A value that specifies the syntax of the search query. The default is + :vartype order_by: str + :ivar query_type: A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include: "simple", "full", "semantic". - :paramtype query_type: str or ~azure.search.documents.models.QueryType - :keyword scoring_statistics: A value that specifies whether we want to calculate scoring + :vartype query_type: str or ~azure.search.documents.models.QueryType + :ivar scoring_statistics: A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global scoring statistics can increase latency of search queries. Possible values include: "local", "global". - :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics - :keyword session_id: A value to be used to create a sticky session, which can help getting more + :vartype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics + :ivar session_id: A value to be used to create a sticky session, which can help getting more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. - :paramtype session_id: str - :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for + :vartype session_id: str + :ivar scoring_parameters: The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). - :paramtype scoring_parameters: list[str] - :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching + :vartype scoring_parameters: list[str] + :ivar scoring_profile: The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. - :paramtype scoring_profile: str - :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to - match all documents. - :paramtype search_text: str - :keyword search_fields: The comma-separated list of field names to which to scope the full-text + :vartype scoring_profile: str + :ivar search_text: A full-text search query expression; Use "*" or omit this parameter to match + all documents. + :vartype search_text: str + :ivar search_fields: The comma-separated list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. - :paramtype search_fields: str - :keyword search_mode: A value that specifies whether any or all of the search terms must be + :vartype search_fields: str + :ivar search_mode: A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. Possible values include: "any", "all". - :paramtype search_mode: str or ~azure.search.documents.models.SearchMode - :keyword query_language: A value that specifies the language of the search query. Possible - values include: "none", "en-us". - :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage - :keyword speller: A value that specified the type of the speller to use to spell-correct + :vartype search_mode: str or ~azure.search.documents.models.SearchMode + :ivar query_language: A value that specifies the language of the search query. Possible values + include: "none", "en-us". + :vartype query_language: str or ~azure.search.documents.models.QueryLanguage + :ivar speller: A value that specified the type of the speller to use to spell-correct individual search query terms. Possible values include: "none", "lexicon". - :paramtype speller: str or ~azure.search.documents.models.Speller - :keyword answers: A value that specifies whether answers should be returned as part of the - search response. Possible values include: "none", "extractive". - :paramtype answers: str or ~azure.search.documents.models.Answers - :keyword select: The comma-separated list of fields to retrieve. If unspecified, all fields - marked as retrievable in the schema are included. - :paramtype select: str - :keyword skip: The number of search results to skip. This value cannot be greater than 100,000. - If you need to scan documents in sequence, but cannot use skip due to this limitation, consider + :vartype speller: str or ~azure.search.documents.models.QuerySpellerType + :ivar answers: A value that specifies whether answers should be returned as part of the search + response. Possible values include: "none", "extractive". + :vartype answers: str or ~azure.search.documents.models.QueryAnswerType + :ivar select: The comma-separated list of fields to retrieve. If unspecified, all fields marked + as retrievable in the schema are included. + :vartype select: str + :ivar skip: The number of search results to skip. This value cannot be greater than 100,000. If + you need to scan documents in sequence, but cannot use skip due to this limitation, consider using orderby on a totally-ordered key and filter with a range query instead. - :paramtype skip: int - :keyword top: The number of search results to retrieve. This can be used in conjunction with - $skip to implement client-side paging of search results. If results are truncated due to - server-side paging, the response will include a continuation token that can be used to issue - another Search request for the next page of results. - :paramtype top: int - :keyword captions: A value that specifies whether captions should be returned as part of the + :vartype skip: int + :ivar top: The number of search results to retrieve. This can be used in conjunction with $skip + to implement client-side paging of search results. If results are truncated due to server-side + paging, the response will include a continuation token that can be used to issue another Search + request for the next page of results. + :vartype top: int + :ivar captions: A value that specifies whether captions should be returned as part of the search response. Possible values include: "none", "extractive". - :paramtype captions: str or ~azure.search.documents.models.Captions - :keyword semantic_fields: The comma-separated list of field names used for semantic search. - :paramtype semantic_fields: str + :vartype captions: str or ~azure.search.documents.models.QueryCaptionType + :ivar semantic_fields: The comma-separated list of field names used for semantic search. + :vartype semantic_fields: str """ _attribute_map = { @@ -860,6 +1064,101 @@ def __init__( self, **kwargs ): + """ + :keyword include_total_result_count: A value that specifies whether to fetch the total count of + results. Default is false. Setting this value to true may have a performance impact. Note that + the count returned is an approximation. + :paramtype include_total_result_count: bool + :keyword facets: The list of facet expressions to apply to the search query. Each facet + expression contains a field name, optionally followed by a comma-separated list of name:value + pairs. + :paramtype facets: list[str] + :keyword filter: The OData $filter expression to apply to the search query. + :paramtype filter: str + :keyword highlight_fields: The comma-separated list of field names to use for hit highlights. + Only searchable fields can be used for hit highlighting. + :paramtype highlight_fields: str + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. Default is </em>. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. Default is <em>. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be covered by a search query in order for the query to be reported as a success. This + parameter can be useful for ensuring search availability even for services with only one + replica. The default is 100. + :paramtype minimum_coverage: float + :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the + results. Each expression can be either a field name or a call to either the geo.distance() or + the search.score() functions. Each expression can be followed by asc to indicate ascending, or + desc to indicate descending. The default is ascending order. Ties will be broken by the match + scores of documents. If no $orderby is specified, the default sort order is descending by + document match score. There can be at most 32 $orderby clauses. + :paramtype order_by: str + :keyword query_type: A value that specifies the syntax of the search query. The default is + 'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include: + "simple", "full", "semantic". + :paramtype query_type: str or ~azure.search.documents.models.QueryType + :keyword scoring_statistics: A value that specifies whether we want to calculate scoring + statistics (such as document frequency) globally for more consistent scoring, or locally, for + lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally + before scoring. Using global scoring statistics can increase latency of search queries. + Possible values include: "local", "global". + :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics + :keyword session_id: A value to be used to create a sticky session, which can help getting more + consistent results. As long as the same sessionId is used, a best-effort attempt will be made + to target the same replica set. Be wary that reusing the same sessionID values repeatedly can + interfere with the load balancing of the requests across replicas and adversely affect the + performance of the search service. The value used as sessionId cannot start with a '_' + character. + :paramtype session_id: str + :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for + example, referencePointParameter) using the format name-values. For example, if the scoring + profile defines a function with a parameter called 'mylocation' the parameter string would be + "mylocation--122.2,44.8" (without the quotes). + :paramtype scoring_parameters: list[str] + :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching + documents in order to sort the results. + :paramtype scoring_profile: str + :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to + match all documents. + :paramtype search_text: str + :keyword search_fields: The comma-separated list of field names to which to scope the full-text + search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the + field names of each fielded search expression take precedence over any field names listed in + this parameter. + :paramtype search_fields: str + :keyword search_mode: A value that specifies whether any or all of the search terms must be + matched in order to count the document as a match. Possible values include: "any", "all". + :paramtype search_mode: str or ~azure.search.documents.models.SearchMode + :keyword query_language: A value that specifies the language of the search query. Possible + values include: "none", "en-us". + :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage + :keyword speller: A value that specified the type of the speller to use to spell-correct + individual search query terms. Possible values include: "none", "lexicon". + :paramtype speller: str or ~azure.search.documents.models.QuerySpellerType + :keyword answers: A value that specifies whether answers should be returned as part of the + search response. Possible values include: "none", "extractive". + :paramtype answers: str or ~azure.search.documents.models.QueryAnswerType + :keyword select: The comma-separated list of fields to retrieve. If unspecified, all fields + marked as retrievable in the schema are included. + :paramtype select: str + :keyword skip: The number of search results to skip. This value cannot be greater than 100,000. + If you need to scan documents in sequence, but cannot use skip due to this limitation, consider + using orderby on a totally-ordered key and filter with a range query instead. + :paramtype skip: int + :keyword top: The number of search results to retrieve. This can be used in conjunction with + $skip to implement client-side paging of search results. If results are truncated due to + server-side paging, the response will include a continuation token that can be used to issue + another Search request for the next page of results. + :paramtype top: int + :keyword captions: A value that specifies whether captions should be returned as part of the + search response. Possible values include: "none", "extractive". + :paramtype captions: str or ~azure.search.documents.models.QueryCaptionType + :keyword semantic_fields: The comma-separated list of field names used for semantic search. + :paramtype semantic_fields: str + """ super(SearchRequest, self).__init__(**kwargs) self.include_total_result_count = kwargs.get('include_total_result_count', None) self.facets = kwargs.get('facets', None) @@ -894,9 +1193,9 @@ class SearchResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] + :vartype additional_properties: dict[str, any] :ivar score: Required. The relevance score of the document compared to other documents returned by the query. :vartype score: float @@ -932,6 +1231,11 @@ def __init__( self, **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + """ super(SearchResult, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.score = None @@ -968,6 +1272,8 @@ def __init__( self, **kwargs ): + """ + """ super(SuggestDocumentsResult, self).__init__(**kwargs) self.results = None self.coverage = None @@ -976,41 +1282,41 @@ def __init__( class SuggestOptions(msrest.serialization.Model): """Parameter group. - :keyword filter: An OData expression that filters the documents considered for suggestions. - :paramtype filter: str - :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the - suggestions query. Default is false. When set to true, the query will find terms even if - there's a substituted or missing character in the search text. While this provides a better - experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are - slower and consume more resources. - :paramtype use_fuzzy_matching: bool - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + :ivar filter: An OData expression that filters the documents considered for suggestions. + :vartype filter: str + :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestions + query. Default is false. When set to true, the query will find terms even if there's a + substituted or missing character in the search text. While this provides a better experience in + some scenarios, it comes at a performance cost as fuzzy suggestions queries are slower and + consume more resources. + :vartype use_fuzzy_matching: bool + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting of suggestions is disabled. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting of suggestions is disabled. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by a suggestions query in order for the query to be reported as a success. - This parameter can be useful for ensuring search availability even for services with only one + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be covered by a suggestions query in order for the query to be reported as a success. This + parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. - :paramtype minimum_coverage: float - :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each + :vartype minimum_coverage: float + :ivar order_by: The list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. - :paramtype order_by: list[str] - :keyword search_fields: The list of field names to search for the specified search text. Target + :vartype order_by: list[str] + :ivar search_fields: The list of field names to search for the specified search text. Target fields must be included in the specified suggester. - :paramtype search_fields: list[str] - :keyword select: The list of fields to retrieve. If unspecified, only the key field will be + :vartype search_fields: list[str] + :ivar select: The list of fields to retrieve. If unspecified, only the key field will be included in the results. - :paramtype select: list[str] - :keyword top: The number of suggestions to retrieve. The value must be a number between 1 and - 100. The default is 5. - :paramtype top: int + :vartype select: list[str] + :ivar top: The number of suggestions to retrieve. The value must be a number between 1 and 100. + The default is 5. + :vartype top: int """ _attribute_map = { @@ -1029,6 +1335,43 @@ def __init__( self, **kwargs ): + """ + :keyword filter: An OData expression that filters the documents considered for suggestions. + :paramtype filter: str + :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the + suggestions query. Default is false. When set to true, the query will find terms even if + there's a substituted or missing character in the search text. While this provides a better + experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are + slower and consume more resources. + :paramtype use_fuzzy_matching: bool + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting of suggestions is disabled. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting of suggestions is disabled. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be covered by a suggestions query in order for the query to be reported as a success. + This parameter can be useful for ensuring search availability even for services with only one + replica. The default is 80. + :paramtype minimum_coverage: float + :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each + expression can be either a field name or a call to either the geo.distance() or the + search.score() functions. Each expression can be followed by asc to indicate ascending, or desc + to indicate descending. The default is ascending order. Ties will be broken by the match scores + of documents. If no $orderby is specified, the default sort order is descending by document + match score. There can be at most 32 $orderby clauses. + :paramtype order_by: list[str] + :keyword search_fields: The list of field names to search for the specified search text. Target + fields must be included in the specified suggester. + :paramtype search_fields: list[str] + :keyword select: The list of fields to retrieve. If unspecified, only the key field will be + included in the results. + :paramtype select: list[str] + :keyword top: The number of suggestions to retrieve. The value must be a number between 1 and + 100. The default is 5. + :paramtype top: int + """ super(SuggestOptions, self).__init__(**kwargs) self.filter = kwargs.get('filter', None) self.use_fuzzy_matching = kwargs.get('use_fuzzy_matching', None) @@ -1046,47 +1389,47 @@ class SuggestRequest(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword filter: An OData expression that filters the documents considered for suggestions. - :paramtype filter: str - :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the - suggestion query. Default is false. When set to true, the query will find suggestions even if - there's a substituted or missing character in the search text. While this provides a better - experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are - slower and consume more resources. - :paramtype use_fuzzy_matching: bool - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + :ivar filter: An OData expression that filters the documents considered for suggestions. + :vartype filter: str + :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestion + query. Default is false. When set to true, the query will find suggestions even if there's a + substituted or missing character in the search text. While this provides a better experience in + some scenarios, it comes at a performance cost as fuzzy suggestion searches are slower and + consume more resources. + :vartype use_fuzzy_matching: bool + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting of suggestions is disabled. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting of suggestions is disabled. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by a suggestion query in order for the query to be reported as a success. - This parameter can be useful for ensuring search availability even for services with only one + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be covered by a suggestion query in order for the query to be reported as a success. This + parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. - :paramtype minimum_coverage: float - :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the + :vartype minimum_coverage: float + :ivar order_by: The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. - :paramtype order_by: str - :keyword search_text: Required. The search text to use to suggest documents. Must be at least 1 + :vartype order_by: str + :ivar search_text: Required. The search text to use to suggest documents. Must be at least 1 character, and no more than 100 characters. - :paramtype search_text: str - :keyword search_fields: The comma-separated list of field names to search for the specified - search text. Target fields must be included in the specified suggester. - :paramtype search_fields: str - :keyword select: The comma-separated list of fields to retrieve. If unspecified, only the key + :vartype search_text: str + :ivar search_fields: The comma-separated list of field names to search for the specified search + text. Target fields must be included in the specified suggester. + :vartype search_fields: str + :ivar select: The comma-separated list of fields to retrieve. If unspecified, only the key field will be included in the results. - :paramtype select: str - :keyword suggester_name: Required. The name of the suggester as specified in the suggesters + :vartype select: str + :ivar suggester_name: Required. The name of the suggester as specified in the suggesters collection that's part of the index definition. - :paramtype suggester_name: str - :keyword top: The number of suggestions to retrieve. This must be a value between 1 and 100. - The default is 5. - :paramtype top: int + :vartype suggester_name: str + :ivar top: The number of suggestions to retrieve. This must be a value between 1 and 100. The + default is 5. + :vartype top: int """ _validation = { @@ -1112,6 +1455,49 @@ def __init__( self, **kwargs ): + """ + :keyword filter: An OData expression that filters the documents considered for suggestions. + :paramtype filter: str + :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the + suggestion query. Default is false. When set to true, the query will find suggestions even if + there's a substituted or missing character in the search text. While this provides a better + experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are + slower and consume more resources. + :paramtype use_fuzzy_matching: bool + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting of suggestions is disabled. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting of suggestions is disabled. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be covered by a suggestion query in order for the query to be reported as a success. + This parameter can be useful for ensuring search availability even for services with only one + replica. The default is 80. + :paramtype minimum_coverage: float + :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the + results. Each expression can be either a field name or a call to either the geo.distance() or + the search.score() functions. Each expression can be followed by asc to indicate ascending, or + desc to indicate descending. The default is ascending order. Ties will be broken by the match + scores of documents. If no $orderby is specified, the default sort order is descending by + document match score. There can be at most 32 $orderby clauses. + :paramtype order_by: str + :keyword search_text: Required. The search text to use to suggest documents. Must be at least 1 + character, and no more than 100 characters. + :paramtype search_text: str + :keyword search_fields: The comma-separated list of field names to search for the specified + search text. Target fields must be included in the specified suggester. + :paramtype search_fields: str + :keyword select: The comma-separated list of fields to retrieve. If unspecified, only the key + field will be included in the results. + :paramtype select: str + :keyword suggester_name: Required. The name of the suggester as specified in the suggesters + collection that's part of the index definition. + :paramtype suggester_name: str + :keyword top: The number of suggestions to retrieve. This must be a value between 1 and 100. + The default is 5. + :paramtype top: int + """ super(SuggestRequest, self).__init__(**kwargs) self.filter = kwargs.get('filter', None) self.use_fuzzy_matching = kwargs.get('use_fuzzy_matching', None) @@ -1133,9 +1519,9 @@ class SuggestResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] + :vartype additional_properties: dict[str, any] :ivar text: Required. The text of the suggestion result. :vartype text: str """ @@ -1153,6 +1539,11 @@ def __init__( self, **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + """ super(SuggestResult, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.text = None diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py index e5e0ece35849..1f17711c1f28 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py @@ -19,9 +19,9 @@ class AnswerResult(msrest.serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] + :vartype additional_properties: dict[str, any] :ivar score: The score value represents how relevant the answer is to the the query relative to other answers returned for the query. :vartype score: float @@ -55,6 +55,11 @@ def __init__( additional_properties: Optional[Dict[str, Any]] = None, **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + """ super(AnswerResult, self).__init__(**kwargs) self.additional_properties = additional_properties self.score = None @@ -90,6 +95,8 @@ def __init__( self, **kwargs ): + """ + """ super(AutocompleteItem, self).__init__(**kwargs) self.text = None self.query_plus_text = None @@ -98,36 +105,36 @@ def __init__( class AutocompleteOptions(msrest.serialization.Model): """Parameter group. - :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use + :ivar autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext". - :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode - :keyword filter: An OData expression that filters the documents used to produce completed terms + :vartype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode + :ivar filter: An OData expression that filters the documents used to produce completed terms for the Autocomplete result. - :paramtype filter: str - :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the - autocomplete query. Default is false. When set to true, the query will find terms even if - there's a substituted or missing character in the search text. While this provides a better - experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are - slower and consume more resources. - :paramtype use_fuzzy_matching: bool - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + :vartype filter: str + :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the autocomplete + query. Default is false. When set to true, the query will find terms even if there's a + substituted or missing character in the search text. While this provides a better experience in + some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and + consume more resources. + :vartype use_fuzzy_matching: bool + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting is disabled. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting is disabled. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by an autocomplete query in order for the query to be reported as a - success. This parameter can be useful for ensuring search availability even for services with - only one replica. The default is 80. - :paramtype minimum_coverage: float - :keyword search_fields: The list of field names to consider when querying for auto-completed + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be covered by an autocomplete query in order for the query to be reported as a success. + This parameter can be useful for ensuring search availability even for services with only one + replica. The default is 80. + :vartype minimum_coverage: float + :ivar search_fields: The list of field names to consider when querying for auto-completed terms. Target fields must be included in the specified suggester. - :paramtype search_fields: list[str] - :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1 - and 100. The default is 5. - :paramtype top: int + :vartype search_fields: list[str] + :ivar top: The number of auto-completed terms to retrieve. This must be a value between 1 and + 100. The default is 5. + :vartype top: int """ _attribute_map = { @@ -154,6 +161,38 @@ def __init__( top: Optional[int] = None, **kwargs ): + """ + :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use + 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing + auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext". + :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode + :keyword filter: An OData expression that filters the documents used to produce completed terms + for the Autocomplete result. + :paramtype filter: str + :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the + autocomplete query. Default is false. When set to true, the query will find terms even if + there's a substituted or missing character in the search text. While this provides a better + experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are + slower and consume more resources. + :paramtype use_fuzzy_matching: bool + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting is disabled. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting is disabled. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be covered by an autocomplete query in order for the query to be reported as a + success. This parameter can be useful for ensuring search availability even for services with + only one replica. The default is 80. + :paramtype minimum_coverage: float + :keyword search_fields: The list of field names to consider when querying for auto-completed + terms. Target fields must be included in the specified suggester. + :paramtype search_fields: list[str] + :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1 + and 100. The default is 5. + :paramtype top: int + """ super(AutocompleteOptions, self).__init__(**kwargs) self.autocomplete_mode = autocomplete_mode self.filter = filter @@ -170,41 +209,41 @@ class AutocompleteRequest(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword search_text: Required. The search text on which to base autocomplete results. - :paramtype search_text: str - :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use + :ivar search_text: Required. The search text on which to base autocomplete results. + :vartype search_text: str + :ivar autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext". - :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode - :keyword filter: An OData expression that filters the documents used to produce completed terms + :vartype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode + :ivar filter: An OData expression that filters the documents used to produce completed terms for the Autocomplete result. - :paramtype filter: str - :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the - autocomplete query. Default is false. When set to true, the query will autocomplete terms even - if there's a substituted or missing character in the search text. While this provides a better - experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are - slower and consume more resources. - :paramtype use_fuzzy_matching: bool - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + :vartype filter: str + :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the autocomplete + query. Default is false. When set to true, the query will autocomplete terms even if there's a + substituted or missing character in the search text. While this provides a better experience in + some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and + consume more resources. + :vartype use_fuzzy_matching: bool + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting is disabled. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting is disabled. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by an autocomplete query in order for the query to be reported as a - success. This parameter can be useful for ensuring search availability even for services with - only one replica. The default is 80. - :paramtype minimum_coverage: float - :keyword search_fields: The comma-separated list of field names to consider when querying for + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be covered by an autocomplete query in order for the query to be reported as a success. + This parameter can be useful for ensuring search availability even for services with only one + replica. The default is 80. + :vartype minimum_coverage: float + :ivar search_fields: The comma-separated list of field names to consider when querying for auto-completed terms. Target fields must be included in the specified suggester. - :paramtype search_fields: str - :keyword suggester_name: Required. The name of the suggester as specified in the suggesters + :vartype search_fields: str + :ivar suggester_name: Required. The name of the suggester as specified in the suggesters collection that's part of the index definition. - :paramtype suggester_name: str - :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1 - and 100. The default is 5. - :paramtype top: int + :vartype suggester_name: str + :ivar top: The number of auto-completed terms to retrieve. This must be a value between 1 and + 100. The default is 5. + :vartype top: int """ _validation = { @@ -240,6 +279,43 @@ def __init__( top: Optional[int] = None, **kwargs ): + """ + :keyword search_text: Required. The search text on which to base autocomplete results. + :paramtype search_text: str + :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use + 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing + auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext". + :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode + :keyword filter: An OData expression that filters the documents used to produce completed terms + for the Autocomplete result. + :paramtype filter: str + :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the + autocomplete query. Default is false. When set to true, the query will autocomplete terms even + if there's a substituted or missing character in the search text. While this provides a better + experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are + slower and consume more resources. + :paramtype use_fuzzy_matching: bool + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting is disabled. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting is disabled. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be covered by an autocomplete query in order for the query to be reported as a + success. This parameter can be useful for ensuring search availability even for services with + only one replica. The default is 80. + :paramtype minimum_coverage: float + :keyword search_fields: The comma-separated list of field names to consider when querying for + auto-completed terms. Target fields must be included in the specified suggester. + :paramtype search_fields: str + :keyword suggester_name: Required. The name of the suggester as specified in the suggesters + collection that's part of the index definition. + :paramtype suggester_name: str + :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1 + and 100. The default is 5. + :paramtype top: int + """ super(AutocompleteRequest, self).__init__(**kwargs) self.search_text = search_text self.autocomplete_mode = autocomplete_mode @@ -281,6 +357,8 @@ def __init__( self, **kwargs ): + """ + """ super(AutocompleteResult, self).__init__(**kwargs) self.coverage = None self.results = None @@ -291,9 +369,9 @@ class CaptionResult(msrest.serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] + :vartype additional_properties: dict[str, any] :ivar text: A representative text passage extracted from the document most relevant to the search query. :vartype text: str @@ -319,6 +397,11 @@ def __init__( additional_properties: Optional[Dict[str, Any]] = None, **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + """ super(CaptionResult, self).__init__(**kwargs) self.additional_properties = additional_properties self.text = None @@ -330,9 +413,9 @@ class FacetResult(msrest.serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] + :vartype additional_properties: dict[str, any] :ivar count: The approximate count of documents falling within the bucket described by this facet. :vartype count: long @@ -353,6 +436,11 @@ def __init__( additional_properties: Optional[Dict[str, Any]] = None, **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + """ super(FacetResult, self).__init__(**kwargs) self.additional_properties = additional_properties self.count = None @@ -361,12 +449,12 @@ def __init__( class IndexAction(msrest.serialization.Model): """Represents an index action that operates on a document. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] - :keyword action_type: The operation to perform on a document in an indexing batch. Possible - values include: "upload", "merge", "mergeOrUpload", "delete". - :paramtype action_type: str or ~azure.search.documents.models.IndexActionType + :vartype additional_properties: dict[str, any] + :ivar action_type: The operation to perform on a document in an indexing batch. Possible values + include: "upload", "merge", "mergeOrUpload", "delete". + :vartype action_type: str or ~azure.search.documents.models.IndexActionType """ _attribute_map = { @@ -381,6 +469,14 @@ def __init__( action_type: Optional[Union[str, "IndexActionType"]] = None, **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + :keyword action_type: The operation to perform on a document in an indexing batch. Possible + values include: "upload", "merge", "mergeOrUpload", "delete". + :paramtype action_type: str or ~azure.search.documents.models.IndexActionType + """ super(IndexAction, self).__init__(**kwargs) self.additional_properties = additional_properties self.action_type = action_type @@ -391,8 +487,8 @@ class IndexBatch(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword actions: Required. The actions in the batch. - :paramtype actions: list[~azure.search.documents.models.IndexAction] + :ivar actions: Required. The actions in the batch. + :vartype actions: list[~azure.search.documents.models.IndexAction] """ _validation = { @@ -409,6 +505,10 @@ def __init__( actions: List["IndexAction"], **kwargs ): + """ + :keyword actions: Required. The actions in the batch. + :paramtype actions: list[~azure.search.documents.models.IndexAction] + """ super(IndexBatch, self).__init__(**kwargs) self.actions = actions @@ -437,6 +537,8 @@ def __init__( self, **kwargs ): + """ + """ super(IndexDocumentsResult, self).__init__(**kwargs) self.results = None @@ -481,6 +583,8 @@ def __init__( self, **kwargs ): + """ + """ super(IndexingResult, self).__init__(**kwargs) self.key = None self.error_message = None @@ -491,8 +595,8 @@ def __init__( class RequestOptions(msrest.serialization.Model): """Parameter group. - :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging. - :paramtype x_ms_client_request_id: str + :ivar x_ms_client_request_id: The tracking ID sent with the request to help with debugging. + :vartype x_ms_client_request_id: str """ _attribute_map = { @@ -505,6 +609,10 @@ def __init__( x_ms_client_request_id: Optional[str] = None, **kwargs ): + """ + :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging. + :paramtype x_ms_client_request_id: str + """ super(RequestOptions, self).__init__(**kwargs) self.x_ms_client_request_id = x_ms_client_request_id @@ -568,6 +676,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchDocumentsResult, self).__init__(**kwargs) self.count = None self.coverage = None @@ -609,6 +719,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchError, self).__init__(**kwargs) self.code = None self.message = None @@ -618,98 +730,97 @@ def __init__( class SearchOptions(msrest.serialization.Model): """Parameter group. - :keyword include_total_result_count: A value that specifies whether to fetch the total count of + :ivar include_total_result_count: A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. - :paramtype include_total_result_count: bool - :keyword facets: The list of facet expressions to apply to the search query. Each facet - expression contains a field name, optionally followed by a comma-separated list of name:value - pairs. - :paramtype facets: list[str] - :keyword filter: The OData $filter expression to apply to the search query. - :paramtype filter: str - :keyword highlight_fields: The list of field names to use for hit highlights. Only searchable + :vartype include_total_result_count: bool + :ivar facets: The list of facet expressions to apply to the search query. Each facet expression + contains a field name, optionally followed by a comma-separated list of name:value pairs. + :vartype facets: list[str] + :ivar filter: The OData $filter expression to apply to the search query. + :vartype filter: str + :ivar highlight_fields: The list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. - :paramtype highlight_fields: list[str] - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + :vartype highlight_fields: list[str] + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by a search query in order for the query to be reported as a success. This + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. - :paramtype minimum_coverage: float - :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each + :vartype minimum_coverage: float + :ivar order_by: The list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, and desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no OrderBy is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. - :paramtype order_by: list[str] - :keyword query_type: A value that specifies the syntax of the search query. The default is + :vartype order_by: list[str] + :ivar query_type: A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include: "simple", "full", "semantic". - :paramtype query_type: str or ~azure.search.documents.models.QueryType - :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for + :vartype query_type: str or ~azure.search.documents.models.QueryType + :ivar scoring_parameters: The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). - :paramtype scoring_parameters: list[str] - :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching + :vartype scoring_parameters: list[str] + :ivar scoring_profile: The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. - :paramtype scoring_profile: str - :keyword search_fields: The list of field names to which to scope the full-text search. When - using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of - each fielded search expression take precedence over any field names listed in this parameter. - :paramtype search_fields: list[str] - :keyword query_language: The language of the query. Possible values include: "none", "en-us". - :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage - :keyword speller: Improve search recall by spell-correcting individual search query terms. + :vartype scoring_profile: str + :ivar search_fields: The list of field names to which to scope the full-text search. When using + fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each + fielded search expression take precedence over any field names listed in this parameter. + :vartype search_fields: list[str] + :ivar query_language: The language of the query. Possible values include: "none", "en-us". + :vartype query_language: str or ~azure.search.documents.models.QueryLanguage + :ivar speller: Improve search recall by spell-correcting individual search query terms. Possible values include: "none", "lexicon". - :paramtype speller: str or ~azure.search.documents.models.Speller - :keyword answers: This parameter is only valid if the query type is 'semantic'. If set, the - query returns answers extracted from key passages in the highest ranked documents. The number - of answers returned can be configured by appending the pipe character '|' followed by the + :vartype speller: str or ~azure.search.documents.models.Speller + :ivar answers: This parameter is only valid if the query type is 'semantic'. If set, the query + returns answers extracted from key passages in the highest ranked documents. The number of + answers returned can be configured by appending the pipe character '|' followed by the 'count-:code:``' option after the answers parameter value, such as 'extractive|count-3'. Default count is 1. Possible values include: "none", "extractive". - :paramtype answers: str or ~azure.search.documents.models.Answers - :keyword search_mode: A value that specifies whether any or all of the search terms must be + :vartype answers: str or ~azure.search.documents.models.Answers + :ivar search_mode: A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. Possible values include: "any", "all". - :paramtype search_mode: str or ~azure.search.documents.models.SearchMode - :keyword scoring_statistics: A value that specifies whether we want to calculate scoring + :vartype search_mode: str or ~azure.search.documents.models.SearchMode + :ivar scoring_statistics: A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. Possible values include: "local", "global". - :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics - :keyword session_id: A value to be used to create a sticky session, which can help to get more + :vartype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics + :ivar session_id: A value to be used to create a sticky session, which can help to get more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. - :paramtype session_id: str - :keyword select: The list of fields to retrieve. If unspecified, all fields marked as - retrievable in the schema are included. - :paramtype select: list[str] - :keyword skip: The number of search results to skip. This value cannot be greater than 100,000. - If you need to scan documents in sequence, but cannot use $skip due to this limitation, - consider using $orderby on a totally-ordered key and $filter with a range query instead. - :paramtype skip: int - :keyword top: The number of search results to retrieve. This can be used in conjunction with - $skip to implement client-side paging of search results. If results are truncated due to - server-side paging, the response will include a continuation token that can be used to issue - another Search request for the next page of results. - :paramtype top: int - :keyword captions: This parameter is only valid if the query type is 'semantic'. If set, the - query returns captions extracted from key passages in the highest ranked documents. When - Captions is set to 'extractive', highlighting is enabled by default, and can be configured by - appending the pipe character '|' followed by the 'highlight-' option, such as + :vartype session_id: str + :ivar select: The list of fields to retrieve. If unspecified, all fields marked as retrievable + in the schema are included. + :vartype select: list[str] + :ivar skip: The number of search results to skip. This value cannot be greater than 100,000. If + you need to scan documents in sequence, but cannot use $skip due to this limitation, consider + using $orderby on a totally-ordered key and $filter with a range query instead. + :vartype skip: int + :ivar top: The number of search results to retrieve. This can be used in conjunction with $skip + to implement client-side paging of search results. If results are truncated due to server-side + paging, the response will include a continuation token that can be used to issue another Search + request for the next page of results. + :vartype top: int + :ivar captions: This parameter is only valid if the query type is 'semantic'. If set, the query + returns captions extracted from key passages in the highest ranked documents. When Captions is + set to 'extractive', highlighting is enabled by default, and can be configured by appending the + pipe character '|' followed by the 'highlight-' option, such as 'extractive|highlight-true'. Defaults to 'None'. Possible values include: "none", "extractive". - :paramtype captions: str or ~azure.search.documents.models.Captions - :keyword semantic_fields: The list of field names used for semantic search. - :paramtype semantic_fields: list[str] + :vartype captions: str or ~azure.search.documents.models.Captions + :ivar semantic_fields: The list of field names used for semantic search. + :vartype semantic_fields: list[str] """ _attribute_map = { @@ -766,6 +877,100 @@ def __init__( semantic_fields: Optional[List[str]] = None, **kwargs ): + """ + :keyword include_total_result_count: A value that specifies whether to fetch the total count of + results. Default is false. Setting this value to true may have a performance impact. Note that + the count returned is an approximation. + :paramtype include_total_result_count: bool + :keyword facets: The list of facet expressions to apply to the search query. Each facet + expression contains a field name, optionally followed by a comma-separated list of name:value + pairs. + :paramtype facets: list[str] + :keyword filter: The OData $filter expression to apply to the search query. + :paramtype filter: str + :keyword highlight_fields: The list of field names to use for hit highlights. Only searchable + fields can be used for hit highlighting. + :paramtype highlight_fields: list[str] + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. Default is </em>. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. Default is <em>. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be covered by a search query in order for the query to be reported as a success. This + parameter can be useful for ensuring search availability even for services with only one + replica. The default is 100. + :paramtype minimum_coverage: float + :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each + expression can be either a field name or a call to either the geo.distance() or the + search.score() functions. Each expression can be followed by asc to indicate ascending, and + desc to indicate descending. The default is ascending order. Ties will be broken by the match + scores of documents. If no OrderBy is specified, the default sort order is descending by + document match score. There can be at most 32 $orderby clauses. + :paramtype order_by: list[str] + :keyword query_type: A value that specifies the syntax of the search query. The default is + 'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include: + "simple", "full", "semantic". + :paramtype query_type: str or ~azure.search.documents.models.QueryType + :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for + example, referencePointParameter) using the format name-values. For example, if the scoring + profile defines a function with a parameter called 'mylocation' the parameter string would be + "mylocation--122.2,44.8" (without the quotes). + :paramtype scoring_parameters: list[str] + :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching + documents in order to sort the results. + :paramtype scoring_profile: str + :keyword search_fields: The list of field names to which to scope the full-text search. When + using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of + each fielded search expression take precedence over any field names listed in this parameter. + :paramtype search_fields: list[str] + :keyword query_language: The language of the query. Possible values include: "none", "en-us". + :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage + :keyword speller: Improve search recall by spell-correcting individual search query terms. + Possible values include: "none", "lexicon". + :paramtype speller: str or ~azure.search.documents.models.Speller + :keyword answers: This parameter is only valid if the query type is 'semantic'. If set, the + query returns answers extracted from key passages in the highest ranked documents. The number + of answers returned can be configured by appending the pipe character '|' followed by the + 'count-:code:``' option after the answers parameter value, such as + 'extractive|count-3'. Default count is 1. Possible values include: "none", "extractive". + :paramtype answers: str or ~azure.search.documents.models.Answers + :keyword search_mode: A value that specifies whether any or all of the search terms must be + matched in order to count the document as a match. Possible values include: "any", "all". + :paramtype search_mode: str or ~azure.search.documents.models.SearchMode + :keyword scoring_statistics: A value that specifies whether we want to calculate scoring + statistics (such as document frequency) globally for more consistent scoring, or locally, for + lower latency. Possible values include: "local", "global". + :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics + :keyword session_id: A value to be used to create a sticky session, which can help to get more + consistent results. As long as the same sessionId is used, a best-effort attempt will be made + to target the same replica set. Be wary that reusing the same sessionID values repeatedly can + interfere with the load balancing of the requests across replicas and adversely affect the + performance of the search service. The value used as sessionId cannot start with a '_' + character. + :paramtype session_id: str + :keyword select: The list of fields to retrieve. If unspecified, all fields marked as + retrievable in the schema are included. + :paramtype select: list[str] + :keyword skip: The number of search results to skip. This value cannot be greater than 100,000. + If you need to scan documents in sequence, but cannot use $skip due to this limitation, + consider using $orderby on a totally-ordered key and $filter with a range query instead. + :paramtype skip: int + :keyword top: The number of search results to retrieve. This can be used in conjunction with + $skip to implement client-side paging of search results. If results are truncated due to + server-side paging, the response will include a continuation token that can be used to issue + another Search request for the next page of results. + :paramtype top: int + :keyword captions: This parameter is only valid if the query type is 'semantic'. If set, the + query returns captions extracted from key passages in the highest ranked documents. When + Captions is set to 'extractive', highlighting is enabled by default, and can be configured by + appending the pipe character '|' followed by the 'highlight-' option, such as + 'extractive|highlight-true'. Defaults to 'None'. Possible values include: "none", "extractive". + :paramtype captions: str or ~azure.search.documents.models.Captions + :keyword semantic_fields: The list of field names used for semantic search. + :paramtype semantic_fields: list[str] + """ super(SearchOptions, self).__init__(**kwargs) self.include_total_result_count = include_total_result_count self.facets = facets @@ -795,99 +1000,98 @@ def __init__( class SearchRequest(msrest.serialization.Model): """Parameters for filtering, sorting, faceting, paging, and other search query behaviors. - :keyword include_total_result_count: A value that specifies whether to fetch the total count of + :ivar include_total_result_count: A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. - :paramtype include_total_result_count: bool - :keyword facets: The list of facet expressions to apply to the search query. Each facet - expression contains a field name, optionally followed by a comma-separated list of name:value - pairs. - :paramtype facets: list[str] - :keyword filter: The OData $filter expression to apply to the search query. - :paramtype filter: str - :keyword highlight_fields: The comma-separated list of field names to use for hit highlights. - Only searchable fields can be used for hit highlighting. - :paramtype highlight_fields: str - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + :vartype include_total_result_count: bool + :ivar facets: The list of facet expressions to apply to the search query. Each facet expression + contains a field name, optionally followed by a comma-separated list of name:value pairs. + :vartype facets: list[str] + :ivar filter: The OData $filter expression to apply to the search query. + :vartype filter: str + :ivar highlight_fields: The comma-separated list of field names to use for hit highlights. Only + searchable fields can be used for hit highlighting. + :vartype highlight_fields: str + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by a search query in order for the query to be reported as a success. This + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. - :paramtype minimum_coverage: float - :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the + :vartype minimum_coverage: float + :ivar order_by: The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. - :paramtype order_by: str - :keyword query_type: A value that specifies the syntax of the search query. The default is + :vartype order_by: str + :ivar query_type: A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include: "simple", "full", "semantic". - :paramtype query_type: str or ~azure.search.documents.models.QueryType - :keyword scoring_statistics: A value that specifies whether we want to calculate scoring + :vartype query_type: str or ~azure.search.documents.models.QueryType + :ivar scoring_statistics: A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global scoring statistics can increase latency of search queries. Possible values include: "local", "global". - :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics - :keyword session_id: A value to be used to create a sticky session, which can help getting more + :vartype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics + :ivar session_id: A value to be used to create a sticky session, which can help getting more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. - :paramtype session_id: str - :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for + :vartype session_id: str + :ivar scoring_parameters: The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). - :paramtype scoring_parameters: list[str] - :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching + :vartype scoring_parameters: list[str] + :ivar scoring_profile: The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. - :paramtype scoring_profile: str - :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to - match all documents. - :paramtype search_text: str - :keyword search_fields: The comma-separated list of field names to which to scope the full-text + :vartype scoring_profile: str + :ivar search_text: A full-text search query expression; Use "*" or omit this parameter to match + all documents. + :vartype search_text: str + :ivar search_fields: The comma-separated list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. - :paramtype search_fields: str - :keyword search_mode: A value that specifies whether any or all of the search terms must be + :vartype search_fields: str + :ivar search_mode: A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. Possible values include: "any", "all". - :paramtype search_mode: str or ~azure.search.documents.models.SearchMode - :keyword query_language: A value that specifies the language of the search query. Possible - values include: "none", "en-us". - :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage - :keyword speller: A value that specified the type of the speller to use to spell-correct + :vartype search_mode: str or ~azure.search.documents.models.SearchMode + :ivar query_language: A value that specifies the language of the search query. Possible values + include: "none", "en-us". + :vartype query_language: str or ~azure.search.documents.models.QueryLanguage + :ivar speller: A value that specified the type of the speller to use to spell-correct individual search query terms. Possible values include: "none", "lexicon". - :paramtype speller: str or ~azure.search.documents.models.Speller - :keyword answers: A value that specifies whether answers should be returned as part of the - search response. Possible values include: "none", "extractive". - :paramtype answers: str or ~azure.search.documents.models.Answers - :keyword select: The comma-separated list of fields to retrieve. If unspecified, all fields - marked as retrievable in the schema are included. - :paramtype select: str - :keyword skip: The number of search results to skip. This value cannot be greater than 100,000. - If you need to scan documents in sequence, but cannot use skip due to this limitation, consider + :vartype speller: str or ~azure.search.documents.models.QuerySpellerType + :ivar answers: A value that specifies whether answers should be returned as part of the search + response. Possible values include: "none", "extractive". + :vartype answers: str or ~azure.search.documents.models.QueryAnswerType + :ivar select: The comma-separated list of fields to retrieve. If unspecified, all fields marked + as retrievable in the schema are included. + :vartype select: str + :ivar skip: The number of search results to skip. This value cannot be greater than 100,000. If + you need to scan documents in sequence, but cannot use skip due to this limitation, consider using orderby on a totally-ordered key and filter with a range query instead. - :paramtype skip: int - :keyword top: The number of search results to retrieve. This can be used in conjunction with - $skip to implement client-side paging of search results. If results are truncated due to - server-side paging, the response will include a continuation token that can be used to issue - another Search request for the next page of results. - :paramtype top: int - :keyword captions: A value that specifies whether captions should be returned as part of the + :vartype skip: int + :ivar top: The number of search results to retrieve. This can be used in conjunction with $skip + to implement client-side paging of search results. If results are truncated due to server-side + paging, the response will include a continuation token that can be used to issue another Search + request for the next page of results. + :vartype top: int + :ivar captions: A value that specifies whether captions should be returned as part of the search response. Possible values include: "none", "extractive". - :paramtype captions: str or ~azure.search.documents.models.Captions - :keyword semantic_fields: The comma-separated list of field names used for semantic search. - :paramtype semantic_fields: str + :vartype captions: str or ~azure.search.documents.models.QueryCaptionType + :ivar semantic_fields: The comma-separated list of field names used for semantic search. + :vartype semantic_fields: str """ _attribute_map = { @@ -937,15 +1141,110 @@ def __init__( search_fields: Optional[str] = None, search_mode: Optional[Union[str, "SearchMode"]] = None, query_language: Optional[Union[str, "QueryLanguage"]] = None, - speller: Optional[Union[str, "Speller"]] = None, - answers: Optional[Union[str, "Answers"]] = None, + speller: Optional[Union[str, "QuerySpellerType"]] = None, + answers: Optional[Union[str, "QueryAnswerType"]] = None, select: Optional[str] = None, skip: Optional[int] = None, top: Optional[int] = None, - captions: Optional[Union[str, "Captions"]] = None, + captions: Optional[Union[str, "QueryCaptionType"]] = None, semantic_fields: Optional[str] = None, **kwargs ): + """ + :keyword include_total_result_count: A value that specifies whether to fetch the total count of + results. Default is false. Setting this value to true may have a performance impact. Note that + the count returned is an approximation. + :paramtype include_total_result_count: bool + :keyword facets: The list of facet expressions to apply to the search query. Each facet + expression contains a field name, optionally followed by a comma-separated list of name:value + pairs. + :paramtype facets: list[str] + :keyword filter: The OData $filter expression to apply to the search query. + :paramtype filter: str + :keyword highlight_fields: The comma-separated list of field names to use for hit highlights. + Only searchable fields can be used for hit highlighting. + :paramtype highlight_fields: str + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. Default is </em>. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. Default is <em>. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be covered by a search query in order for the query to be reported as a success. This + parameter can be useful for ensuring search availability even for services with only one + replica. The default is 100. + :paramtype minimum_coverage: float + :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the + results. Each expression can be either a field name or a call to either the geo.distance() or + the search.score() functions. Each expression can be followed by asc to indicate ascending, or + desc to indicate descending. The default is ascending order. Ties will be broken by the match + scores of documents. If no $orderby is specified, the default sort order is descending by + document match score. There can be at most 32 $orderby clauses. + :paramtype order_by: str + :keyword query_type: A value that specifies the syntax of the search query. The default is + 'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include: + "simple", "full", "semantic". + :paramtype query_type: str or ~azure.search.documents.models.QueryType + :keyword scoring_statistics: A value that specifies whether we want to calculate scoring + statistics (such as document frequency) globally for more consistent scoring, or locally, for + lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally + before scoring. Using global scoring statistics can increase latency of search queries. + Possible values include: "local", "global". + :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics + :keyword session_id: A value to be used to create a sticky session, which can help getting more + consistent results. As long as the same sessionId is used, a best-effort attempt will be made + to target the same replica set. Be wary that reusing the same sessionID values repeatedly can + interfere with the load balancing of the requests across replicas and adversely affect the + performance of the search service. The value used as sessionId cannot start with a '_' + character. + :paramtype session_id: str + :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for + example, referencePointParameter) using the format name-values. For example, if the scoring + profile defines a function with a parameter called 'mylocation' the parameter string would be + "mylocation--122.2,44.8" (without the quotes). + :paramtype scoring_parameters: list[str] + :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching + documents in order to sort the results. + :paramtype scoring_profile: str + :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to + match all documents. + :paramtype search_text: str + :keyword search_fields: The comma-separated list of field names to which to scope the full-text + search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the + field names of each fielded search expression take precedence over any field names listed in + this parameter. + :paramtype search_fields: str + :keyword search_mode: A value that specifies whether any or all of the search terms must be + matched in order to count the document as a match. Possible values include: "any", "all". + :paramtype search_mode: str or ~azure.search.documents.models.SearchMode + :keyword query_language: A value that specifies the language of the search query. Possible + values include: "none", "en-us". + :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage + :keyword speller: A value that specified the type of the speller to use to spell-correct + individual search query terms. Possible values include: "none", "lexicon". + :paramtype speller: str or ~azure.search.documents.models.QuerySpellerType + :keyword answers: A value that specifies whether answers should be returned as part of the + search response. Possible values include: "none", "extractive". + :paramtype answers: str or ~azure.search.documents.models.QueryAnswerType + :keyword select: The comma-separated list of fields to retrieve. If unspecified, all fields + marked as retrievable in the schema are included. + :paramtype select: str + :keyword skip: The number of search results to skip. This value cannot be greater than 100,000. + If you need to scan documents in sequence, but cannot use skip due to this limitation, consider + using orderby on a totally-ordered key and filter with a range query instead. + :paramtype skip: int + :keyword top: The number of search results to retrieve. This can be used in conjunction with + $skip to implement client-side paging of search results. If results are truncated due to + server-side paging, the response will include a continuation token that can be used to issue + another Search request for the next page of results. + :paramtype top: int + :keyword captions: A value that specifies whether captions should be returned as part of the + search response. Possible values include: "none", "extractive". + :paramtype captions: str or ~azure.search.documents.models.QueryCaptionType + :keyword semantic_fields: The comma-separated list of field names used for semantic search. + :paramtype semantic_fields: str + """ super(SearchRequest, self).__init__(**kwargs) self.include_total_result_count = include_total_result_count self.facets = facets @@ -980,9 +1279,9 @@ class SearchResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] + :vartype additional_properties: dict[str, any] :ivar score: Required. The relevance score of the document compared to other documents returned by the query. :vartype score: float @@ -1020,6 +1319,11 @@ def __init__( additional_properties: Optional[Dict[str, Any]] = None, **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + """ super(SearchResult, self).__init__(**kwargs) self.additional_properties = additional_properties self.score = None @@ -1056,6 +1360,8 @@ def __init__( self, **kwargs ): + """ + """ super(SuggestDocumentsResult, self).__init__(**kwargs) self.results = None self.coverage = None @@ -1064,41 +1370,41 @@ def __init__( class SuggestOptions(msrest.serialization.Model): """Parameter group. - :keyword filter: An OData expression that filters the documents considered for suggestions. - :paramtype filter: str - :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the - suggestions query. Default is false. When set to true, the query will find terms even if - there's a substituted or missing character in the search text. While this provides a better - experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are - slower and consume more resources. - :paramtype use_fuzzy_matching: bool - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + :ivar filter: An OData expression that filters the documents considered for suggestions. + :vartype filter: str + :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestions + query. Default is false. When set to true, the query will find terms even if there's a + substituted or missing character in the search text. While this provides a better experience in + some scenarios, it comes at a performance cost as fuzzy suggestions queries are slower and + consume more resources. + :vartype use_fuzzy_matching: bool + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting of suggestions is disabled. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting of suggestions is disabled. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by a suggestions query in order for the query to be reported as a success. - This parameter can be useful for ensuring search availability even for services with only one + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be covered by a suggestions query in order for the query to be reported as a success. This + parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. - :paramtype minimum_coverage: float - :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each + :vartype minimum_coverage: float + :ivar order_by: The list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. - :paramtype order_by: list[str] - :keyword search_fields: The list of field names to search for the specified search text. Target + :vartype order_by: list[str] + :ivar search_fields: The list of field names to search for the specified search text. Target fields must be included in the specified suggester. - :paramtype search_fields: list[str] - :keyword select: The list of fields to retrieve. If unspecified, only the key field will be + :vartype search_fields: list[str] + :ivar select: The list of fields to retrieve. If unspecified, only the key field will be included in the results. - :paramtype select: list[str] - :keyword top: The number of suggestions to retrieve. The value must be a number between 1 and - 100. The default is 5. - :paramtype top: int + :vartype select: list[str] + :ivar top: The number of suggestions to retrieve. The value must be a number between 1 and 100. + The default is 5. + :vartype top: int """ _attribute_map = { @@ -1127,6 +1433,43 @@ def __init__( top: Optional[int] = None, **kwargs ): + """ + :keyword filter: An OData expression that filters the documents considered for suggestions. + :paramtype filter: str + :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the + suggestions query. Default is false. When set to true, the query will find terms even if + there's a substituted or missing character in the search text. While this provides a better + experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are + slower and consume more resources. + :paramtype use_fuzzy_matching: bool + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting of suggestions is disabled. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting of suggestions is disabled. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be covered by a suggestions query in order for the query to be reported as a success. + This parameter can be useful for ensuring search availability even for services with only one + replica. The default is 80. + :paramtype minimum_coverage: float + :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each + expression can be either a field name or a call to either the geo.distance() or the + search.score() functions. Each expression can be followed by asc to indicate ascending, or desc + to indicate descending. The default is ascending order. Ties will be broken by the match scores + of documents. If no $orderby is specified, the default sort order is descending by document + match score. There can be at most 32 $orderby clauses. + :paramtype order_by: list[str] + :keyword search_fields: The list of field names to search for the specified search text. Target + fields must be included in the specified suggester. + :paramtype search_fields: list[str] + :keyword select: The list of fields to retrieve. If unspecified, only the key field will be + included in the results. + :paramtype select: list[str] + :keyword top: The number of suggestions to retrieve. The value must be a number between 1 and + 100. The default is 5. + :paramtype top: int + """ super(SuggestOptions, self).__init__(**kwargs) self.filter = filter self.use_fuzzy_matching = use_fuzzy_matching @@ -1144,47 +1487,47 @@ class SuggestRequest(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword filter: An OData expression that filters the documents considered for suggestions. - :paramtype filter: str - :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the - suggestion query. Default is false. When set to true, the query will find suggestions even if - there's a substituted or missing character in the search text. While this provides a better - experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are - slower and consume more resources. - :paramtype use_fuzzy_matching: bool - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + :ivar filter: An OData expression that filters the documents considered for suggestions. + :vartype filter: str + :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestion + query. Default is false. When set to true, the query will find suggestions even if there's a + substituted or missing character in the search text. While this provides a better experience in + some scenarios, it comes at a performance cost as fuzzy suggestion searches are slower and + consume more resources. + :vartype use_fuzzy_matching: bool + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting of suggestions is disabled. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting of suggestions is disabled. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by a suggestion query in order for the query to be reported as a success. - This parameter can be useful for ensuring search availability even for services with only one + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be covered by a suggestion query in order for the query to be reported as a success. This + parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. - :paramtype minimum_coverage: float - :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the + :vartype minimum_coverage: float + :ivar order_by: The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. - :paramtype order_by: str - :keyword search_text: Required. The search text to use to suggest documents. Must be at least 1 + :vartype order_by: str + :ivar search_text: Required. The search text to use to suggest documents. Must be at least 1 character, and no more than 100 characters. - :paramtype search_text: str - :keyword search_fields: The comma-separated list of field names to search for the specified - search text. Target fields must be included in the specified suggester. - :paramtype search_fields: str - :keyword select: The comma-separated list of fields to retrieve. If unspecified, only the key + :vartype search_text: str + :ivar search_fields: The comma-separated list of field names to search for the specified search + text. Target fields must be included in the specified suggester. + :vartype search_fields: str + :ivar select: The comma-separated list of fields to retrieve. If unspecified, only the key field will be included in the results. - :paramtype select: str - :keyword suggester_name: Required. The name of the suggester as specified in the suggesters + :vartype select: str + :ivar suggester_name: Required. The name of the suggester as specified in the suggesters collection that's part of the index definition. - :paramtype suggester_name: str - :keyword top: The number of suggestions to retrieve. This must be a value between 1 and 100. - The default is 5. - :paramtype top: int + :vartype suggester_name: str + :ivar top: The number of suggestions to retrieve. This must be a value between 1 and 100. The + default is 5. + :vartype top: int """ _validation = { @@ -1222,6 +1565,49 @@ def __init__( top: Optional[int] = None, **kwargs ): + """ + :keyword filter: An OData expression that filters the documents considered for suggestions. + :paramtype filter: str + :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the + suggestion query. Default is false. When set to true, the query will find suggestions even if + there's a substituted or missing character in the search text. While this provides a better + experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are + slower and consume more resources. + :paramtype use_fuzzy_matching: bool + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting of suggestions is disabled. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting of suggestions is disabled. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be covered by a suggestion query in order for the query to be reported as a success. + This parameter can be useful for ensuring search availability even for services with only one + replica. The default is 80. + :paramtype minimum_coverage: float + :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the + results. Each expression can be either a field name or a call to either the geo.distance() or + the search.score() functions. Each expression can be followed by asc to indicate ascending, or + desc to indicate descending. The default is ascending order. Ties will be broken by the match + scores of documents. If no $orderby is specified, the default sort order is descending by + document match score. There can be at most 32 $orderby clauses. + :paramtype order_by: str + :keyword search_text: Required. The search text to use to suggest documents. Must be at least 1 + character, and no more than 100 characters. + :paramtype search_text: str + :keyword search_fields: The comma-separated list of field names to search for the specified + search text. Target fields must be included in the specified suggester. + :paramtype search_fields: str + :keyword select: The comma-separated list of fields to retrieve. If unspecified, only the key + field will be included in the results. + :paramtype select: str + :keyword suggester_name: Required. The name of the suggester as specified in the suggesters + collection that's part of the index definition. + :paramtype suggester_name: str + :keyword top: The number of suggestions to retrieve. This must be a value between 1 and 100. + The default is 5. + :paramtype top: int + """ super(SuggestRequest, self).__init__(**kwargs) self.filter = filter self.use_fuzzy_matching = use_fuzzy_matching @@ -1243,9 +1629,9 @@ class SuggestResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] + :vartype additional_properties: dict[str, any] :ivar text: Required. The text of the suggestion result. :vartype text: str """ @@ -1265,6 +1651,11 @@ def __init__( additional_properties: Optional[Dict[str, Any]] = None, **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + """ super(SuggestResult, self).__init__(**kwargs) self.additional_properties = additional_properties self.text = None diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_client_enums.py index 29a5bdc2c30d..5a07c4944d99 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_client_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_client_enums.py @@ -12,12 +12,6 @@ class Answers(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): - """This parameter is only valid if the query type is 'semantic'. If set, the query returns answers - extracted from key passages in the highest ranked documents. The number of answers returned can - be configured by appending the pipe character '|' followed by the 'count-:code:``' option after the answers parameter value, such as 'extractive|count-3'. Default - count is 1. - """ #: Do not return answers for the query. NONE = "none" @@ -43,12 +37,6 @@ class AutocompleteMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): ONE_TERM_WITH_CONTEXT = "oneTermWithContext" class Captions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): - """This parameter is only valid if the query type is 'semantic'. If set, the query returns - captions extracted from key passages in the highest ranked documents. When Captions is set to - 'extractive', highlighting is enabled by default, and can be configured by appending the pipe - character '|' followed by the 'highlight-' option, such as - 'extractive|highlight-true'. Defaults to 'None'. - """ #: Do not return captions for the query. NONE = "none" @@ -75,6 +63,34 @@ class IndexActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): #: document, use merge instead and set the field explicitly to null. DELETE = "delete" +class QueryAnswerType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): + """This parameter is only valid if the query type is 'semantic'. If set, the query returns answers + extracted from key passages in the highest ranked documents. The number of answers returned can + be configured by appending the pipe character '|' followed by the 'count-:code:``' option after the answers parameter value, such as 'extractive|count-3'. Default + count is 1. + """ + + #: Do not return answers for the query. + NONE = "none" + #: Extracts answer candidates from the contents of the documents returned in response to a query + #: expressed as a question in natural language. + EXTRACTIVE = "extractive" + +class QueryCaptionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): + """This parameter is only valid if the query type is 'semantic'. If set, the query returns + captions extracted from key passages in the highest ranked documents. When Captions is set to + 'extractive', highlighting is enabled by default, and can be configured by appending the pipe + character '|' followed by the 'highlight-' option, such as + 'extractive|highlight-true'. Defaults to 'None'. + """ + + #: Do not return captions for the query. + NONE = "none" + #: Extracts captions from the matching documents that contain passages relevant to the search + #: query. + EXTRACTIVE = "extractive" + class QueryLanguage(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The language of the query. """ @@ -84,6 +100,16 @@ class QueryLanguage(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): #: English. EN_US = "en-us" +class QuerySpellerType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): + """Improve search recall by spell-correcting individual search query terms. + """ + + #: Speller not enabled. + NONE = "none" + #: Speller corrects individual query terms using a static lexicon for the language specified by + #: the queryLanguage parameter. + LEXICON = "lexicon" + class QueryType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax and 'semantic' if query syntax is not needed. @@ -125,8 +151,6 @@ class SearchMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): ALL = "all" class Speller(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): - """Improve search recall by spell-correcting individual search query terms. - """ #: Speller not enabled. NONE = "none" diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py index 39c09ccee60a..3bcd60f1e51d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py @@ -12,12 +12,12 @@ from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse -from azure.core.pipeline.transport._base import _format_url_section from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from msrest import Serializer from .. import models as _models +from .._vendor import _convert_request, _format_url_section if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -502,7 +502,8 @@ def count( request = build_count_request( x_ms_client_request_id=_x_ms_client_request_id, template_url=self.count.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -514,7 +515,7 @@ def count( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('long', pipeline_response) @@ -634,7 +635,8 @@ def search_get( semantic_fields=_semantic_fields, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.search_get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -646,7 +648,7 @@ def search_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchDocumentsResult', pipeline_response) @@ -696,7 +698,8 @@ def search_post( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.search_post.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -708,7 +711,7 @@ def search_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchDocumentsResult', pipeline_response) @@ -759,7 +762,8 @@ def get( selected_fields=selected_fields, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -771,7 +775,7 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('object', pipeline_response) @@ -854,7 +858,8 @@ def suggest_get( top=_top, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.suggest_get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -866,7 +871,7 @@ def suggest_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response) @@ -916,7 +921,8 @@ def suggest_post( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.suggest_post.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -928,7 +934,7 @@ def suggest_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response) @@ -979,7 +985,8 @@ def index( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.index.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -991,7 +998,7 @@ def index( if response.status_code not in [200, 207]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if response.status_code == 200: @@ -1074,7 +1081,8 @@ def autocomplete_get( search_fields=_search_fields, top=_top, template_url=self.autocomplete_get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -1086,7 +1094,7 @@ def autocomplete_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AutocompleteResult', pipeline_response) @@ -1136,7 +1144,8 @@ def autocomplete_post( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.autocomplete_post.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), "indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'), @@ -1148,7 +1157,7 @@ def autocomplete_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AutocompleteResult', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py index bb5da1902f3c..79fce398aad8 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py @@ -202,17 +202,17 @@ def search(self, search_text, **kwargs): # pylint:disable=too-many-locals :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage :keyword query_speller: A value that specified the type of the speller to use to spell-correct individual search query terms. Possible values include: "none", "lexicon". - :paramtype query_speller: str or ~azure.search.documents.models.Speller + :paramtype query_speller: str or ~azure.search.documents.models.QuerySpellerType :keyword query_answer: This parameter is only valid if the query type is 'semantic'. If set, the query returns answers extracted from key passages in the highest ranked documents. Possible values include: "none", "extractive". - :paramtype query_answer: str or ~azure.search.documents.models.Answers + :paramtype query_answer: str or ~azure.search.documents.models.QueryAnswerType :keyword int query_answer_count: This parameter is only valid if the query type is 'semantic' and query answer is 'extractive'. Configures the number of answers returned. Default count is 1. :keyword query_caption: This parameter is only valid if the query type is 'semantic'. If set, the query returns captions extracted from key passages in the highest ranked documents. Defaults to 'None'. Possible values include: "none", "extractive". - :paramtype query_caption: str or ~azure.search.documents.models.Captions + :paramtype query_caption: str or ~azure.search.documents.models.QueryCaptionType :keyword bool query_caption_highlight: This parameter is only valid if the query type is 'semantic' when query caption is set to 'extractive'. Determines whether highlighting is enabled. Defaults to 'true'. diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py index 7ed9fe24dbf2..65a51b4610da 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py @@ -180,18 +180,18 @@ async def search(self, search_text, **kwargs): # pylint:disable=too-many-locals :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage :keyword query_speller: A value that specified the type of the speller to use to spell-correct individual search query terms. Possible values include: "none", "lexicon". - :paramtype query_speller: str or ~azure.search.documents.models.Speller + :paramtype query_speller: str or ~azure.search.documents.models.QuerySpellerType :keyword query_answer: This parameter is only valid if the query type is 'semantic'. If set, the query returns answers extracted from key passages in the highest ranked documents. Possible values include: "none", "extractive". - :paramtype query_answer: str or ~azure.search.documents.models.Answers + :paramtype query_answer: str or ~azure.search.documents.models.QueryAnswerType :keyword int query_answer_count: This parameter is only valid if the query type is 'semantic' and query answer is 'extractive'. Configures the number of answers returned. Default count is 1. :keyword query_caption: This parameter is only valid if the query type is 'semantic'. If set, the query returns captions extracted from key passages in the highest ranked documents. Defaults to 'None'. Possible values include: "none", "extractive". - :paramtype query_caption: str or ~azure.search.documents.models.Captions + :paramtype query_caption: str or ~azure.search.documents.models.QueryCaptionType :keyword bool query_caption_highlight: This parameter is only valid if the query type is 'semantic' when query caption is set to 'extractive'. Determines whether highlighting is enabled. Defaults to 'true'. diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_vendor.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_vendor.py new file mode 100644 index 000000000000..138f663c53a4 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_vendor.py @@ -0,0 +1,27 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.core.pipeline.transport import HttpRequest + +def _convert_request(request, files=None): + data = request.content if not files else None + request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data) + if files: + request.set_formdata_body(files) + return request + +def _format_url_section(template, **kwargs): + components = template.split("/") + while components: + try: + return template.format(**kwargs) + except KeyError as key: + formatted_components = template.split("/") + components = [ + c for c in formatted_components if "{}".format(key.args[0]) not in c + ] + template = "/".join(components) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py index 8f4a05b35c10..42ad6b8b96d3 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py @@ -16,6 +16,7 @@ from azure.core.tracing.decorator_async import distributed_trace_async from ... import models as _models +from ..._vendor import _convert_request from ...operations._data_sources_operations import build_create_or_update_request, build_create_request, build_delete_request, build_get_request, build_list_request T = TypeVar('T') @@ -50,7 +51,7 @@ async def create_or_update( data_source: "_models.SearchIndexerDataSource", if_match: Optional[str] = None, if_none_match: Optional[str] = None, - ignore_reset_requirements: Optional[bool] = None, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, request_options: Optional["_models.RequestOptions"] = None, **kwargs: Any ) -> "_models.SearchIndexerDataSource": @@ -66,8 +67,8 @@ async def create_or_update( :param if_none_match: Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. :type if_none_match: str - :param ignore_reset_requirements: Ignores cache reset requirements. - :type ignore_reset_requirements: bool + :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. + :type skip_indexer_reset_requirement_for_cache: bool :param request_options: Parameter group. :type request_options: ~azure.search.documents.indexes.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response @@ -94,10 +95,11 @@ async def create_or_update( x_ms_client_request_id=_x_ms_client_request_id, if_match=if_match, if_none_match=if_none_match, - ignore_reset_requirements=ignore_reset_requirements, + skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, json=json, template_url=self.create_or_update.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -108,7 +110,7 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if response.status_code == 200: @@ -167,7 +169,8 @@ async def delete( if_match=if_match, if_none_match=if_none_match, template_url=self.delete.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -178,7 +181,7 @@ async def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -219,7 +222,8 @@ async def get( data_source_name=data_source_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -230,7 +234,7 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) @@ -277,7 +281,8 @@ async def list( select=select, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.list.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -288,7 +293,7 @@ async def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('ListDataSourcesResult', pipeline_response) @@ -337,7 +342,8 @@ async def create( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.create.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -348,7 +354,7 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py index 31bdc5b09925..bafbd1894276 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py @@ -16,6 +16,7 @@ from azure.core.tracing.decorator_async import distributed_trace_async from ... import models as _models +from ..._vendor import _convert_request from ...operations._indexers_operations import build_create_or_update_request, build_create_request, build_delete_request, build_get_request, build_get_status_request, build_list_request, build_reset_docs_request, build_reset_request, build_run_request T = TypeVar('T') @@ -75,7 +76,8 @@ async def reset( indexer_name=indexer_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.reset.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -86,7 +88,7 @@ async def reset( if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -100,7 +102,7 @@ async def reset_docs( self, indexer_name: str, overwrite: Optional[bool] = False, - keys_or_ids: Optional["_models.Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema"] = None, + keys_or_ids: Optional["_models.DocumentKeysOrIds"] = None, request_options: Optional["_models.RequestOptions"] = None, **kwargs: Any ) -> None: @@ -112,8 +114,7 @@ async def reset_docs( keys or ids in this payload will be queued to be re-ingested. :type overwrite: bool :param keys_or_ids: - :type keys_or_ids: - ~azure.search.documents.indexes.models.Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema + :type keys_or_ids: ~azure.search.documents.indexes.models.DocumentKeysOrIds :param request_options: Parameter group. :type request_options: ~azure.search.documents.indexes.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response @@ -133,7 +134,7 @@ async def reset_docs( if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id if keys_or_ids is not None: - json = self._serialize.body(keys_or_ids, 'Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema') + json = self._serialize.body(keys_or_ids, 'DocumentKeysOrIds') else: json = None @@ -144,7 +145,8 @@ async def reset_docs( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.reset_docs.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -155,7 +157,7 @@ async def reset_docs( if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -196,7 +198,8 @@ async def run( indexer_name=indexer_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.run.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -207,7 +210,7 @@ async def run( if response.status_code not in [202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -223,8 +226,8 @@ async def create_or_update( indexer: "_models.SearchIndexer", if_match: Optional[str] = None, if_none_match: Optional[str] = None, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, disable_cache_reprocessing_change_detection: Optional[bool] = None, - ignore_reset_requirements: Optional[bool] = None, request_options: Optional["_models.RequestOptions"] = None, **kwargs: Any ) -> "_models.SearchIndexer": @@ -240,11 +243,11 @@ async def create_or_update( :param if_none_match: Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. :type if_none_match: str + :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. + :type skip_indexer_reset_requirement_for_cache: bool :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change detection. :type disable_cache_reprocessing_change_detection: bool - :param ignore_reset_requirements: Ignores cache reset requirements. - :type ignore_reset_requirements: bool :param request_options: Parameter group. :type request_options: ~azure.search.documents.indexes.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response @@ -271,11 +274,12 @@ async def create_or_update( x_ms_client_request_id=_x_ms_client_request_id, if_match=if_match, if_none_match=if_none_match, + skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, - ignore_reset_requirements=ignore_reset_requirements, json=json, template_url=self.create_or_update.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -286,7 +290,7 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if response.status_code == 200: @@ -345,7 +349,8 @@ async def delete( if_match=if_match, if_none_match=if_none_match, template_url=self.delete.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -356,7 +361,7 @@ async def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -397,7 +402,8 @@ async def get( indexer_name=indexer_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -408,7 +414,7 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexer', pipeline_response) @@ -455,7 +461,8 @@ async def list( select=select, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.list.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -466,7 +473,7 @@ async def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('ListIndexersResult', pipeline_response) @@ -515,7 +522,8 @@ async def create( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.create.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -526,7 +534,7 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexer', pipeline_response) @@ -571,7 +579,8 @@ async def get_status( indexer_name=indexer_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get_status.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -582,7 +591,7 @@ async def get_status( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexerStatus', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py index b392a4a7e928..7297cec7b06d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py @@ -18,6 +18,7 @@ from azure.core.tracing.decorator_async import distributed_trace_async from ... import models as _models +from ..._vendor import _convert_request from ...operations._indexes_operations import build_analyze_request, build_create_or_update_request, build_create_request, build_delete_request, build_get_request, build_get_statistics_request, build_list_request T = TypeVar('T') @@ -81,7 +82,8 @@ async def create( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.create.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -92,7 +94,7 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndex', pipeline_response) @@ -141,7 +143,8 @@ def prepare_request(next_link=None): select=select, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.list.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -156,7 +159,8 @@ def prepare_request(next_link=None): select=select, x_ms_client_request_id=_x_ms_client_request_id, template_url=next_link, - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -183,7 +187,7 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -252,7 +256,8 @@ async def create_or_update( if_none_match=if_none_match, json=json, template_url=self.create_or_update.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -263,7 +268,7 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if response.status_code == 200: @@ -324,7 +329,8 @@ async def delete( if_match=if_match, if_none_match=if_none_match, template_url=self.delete.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -335,7 +341,7 @@ async def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -376,7 +382,8 @@ async def get( index_name=index_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -387,7 +394,7 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndex', pipeline_response) @@ -432,7 +439,8 @@ async def get_statistics( index_name=index_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get_statistics.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -443,7 +451,7 @@ async def get_statistics( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('GetIndexStatisticsResult', pipeline_response) @@ -496,7 +504,8 @@ async def analyze( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.analyze.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -507,7 +516,7 @@ async def analyze( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AnalyzeResult', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_client_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_client_operations.py index 259e7f045d3c..ed1c90f5d34f 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_client_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_client_operations.py @@ -16,6 +16,7 @@ from azure.core.tracing.decorator_async import distributed_trace_async from ... import models as _models +from ..._vendor import _convert_request from ...operations._search_client_operations import build_get_service_statistics_request T = TypeVar('T') @@ -51,7 +52,8 @@ async def get_service_statistics( request = build_get_service_statistics_request( x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get_service_statistics.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -62,7 +64,7 @@ async def get_service_statistics( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('ServiceStatistics', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py index 23db88c2f37a..900829d43761 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py @@ -6,7 +6,7 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error @@ -16,7 +16,8 @@ from azure.core.tracing.decorator_async import distributed_trace_async from ... import models as _models -from ...operations._skillsets_operations import build_create_or_update_request, build_create_request, build_delete_request, build_get_request, build_list_request +from ..._vendor import _convert_request +from ...operations._skillsets_operations import build_create_or_update_request, build_create_request, build_delete_request, build_get_request, build_list_request, build_reset_skills_request T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] @@ -50,8 +51,8 @@ async def create_or_update( skillset: "_models.SearchIndexerSkillset", if_match: Optional[str] = None, if_none_match: Optional[str] = None, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, disable_cache_reprocessing_change_detection: Optional[bool] = None, - ignore_reset_requirements: Optional[bool] = None, request_options: Optional["_models.RequestOptions"] = None, **kwargs: Any ) -> "_models.SearchIndexerSkillset": @@ -68,11 +69,11 @@ async def create_or_update( :param if_none_match: Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. :type if_none_match: str + :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. + :type skip_indexer_reset_requirement_for_cache: bool :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change detection. :type disable_cache_reprocessing_change_detection: bool - :param ignore_reset_requirements: Ignores cache reset requirements. - :type ignore_reset_requirements: bool :param request_options: Parameter group. :type request_options: ~azure.search.documents.indexes.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response @@ -99,11 +100,12 @@ async def create_or_update( x_ms_client_request_id=_x_ms_client_request_id, if_match=if_match, if_none_match=if_none_match, + skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, - ignore_reset_requirements=ignore_reset_requirements, json=json, template_url=self.create_or_update.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -114,7 +116,7 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if response.status_code == 200: @@ -173,7 +175,8 @@ async def delete( if_match=if_match, if_none_match=if_none_match, template_url=self.delete.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -184,7 +187,7 @@ async def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -225,7 +228,8 @@ async def get( skillset_name=skillset_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -236,7 +240,7 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) @@ -283,7 +287,8 @@ async def list( select=select, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.list.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -294,7 +299,7 @@ async def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('ListSkillsetsResult', pipeline_response) @@ -343,7 +348,8 @@ async def create( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.create.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -354,7 +360,7 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) @@ -366,3 +372,65 @@ async def create( create.metadata = {'url': '/skillsets'} # type: ignore + + @distributed_trace_async + async def reset_skills( + self, + skillset_name: str, + skill_names: Optional[List[str]] = None, + request_options: Optional["_models.RequestOptions"] = None, + **kwargs: Any + ) -> None: + """Reset an existing skillset in a search service. + + :param skillset_name: The name of the skillset to reset. + :type skillset_name: str + :param skill_names: the names of skills to be reset. + :type skill_names: list[str] + :param request_options: Parameter group. + :type request_options: ~azure.search.documents.indexes.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + _skill_names = _models.SkillNames(skill_names=skill_names) + json = self._serialize.body(_skill_names, 'SkillNames') + + request = build_reset_skills_request( + skillset_name=skillset_name, + content_type=content_type, + x_ms_client_request_id=_x_ms_client_request_id, + json=json, + template_url=self.reset_skills.metadata['url'], + ) + request = _convert_request(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + request.url = self._client.format_url(request.url, **path_format_arguments) + + pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + reset_skills.metadata = {'url': '/skillsets(\'{skillsetName}\')/search.resetskills'} # type: ignore + diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py index 78036d8873d1..655129db1551 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py @@ -16,6 +16,7 @@ from azure.core.tracing.decorator_async import distributed_trace_async from ... import models as _models +from ..._vendor import _convert_request from ...operations._synonym_maps_operations import build_create_or_update_request, build_create_request, build_delete_request, build_get_request, build_list_request T = TypeVar('T') @@ -93,7 +94,8 @@ async def create_or_update( if_none_match=if_none_match, json=json, template_url=self.create_or_update.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -104,7 +106,7 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if response.status_code == 200: @@ -163,7 +165,8 @@ async def delete( if_match=if_match, if_none_match=if_none_match, template_url=self.delete.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -174,7 +177,7 @@ async def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -215,7 +218,8 @@ async def get( synonym_map_name=synonym_map_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -226,7 +230,7 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SynonymMap', pipeline_response) @@ -273,7 +277,8 @@ async def list( select=select, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.list.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -284,7 +289,7 @@ async def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('ListSynonymMapsResult', pipeline_response) @@ -333,7 +338,8 @@ async def create( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.create.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -344,7 +350,7 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SynonymMap', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py index c358e0dbef5a..cc1a6e75a0d6 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py @@ -35,6 +35,7 @@ from ._models_py3 import DistanceScoringFunction from ._models_py3 import DistanceScoringParameters from ._models_py3 import DocumentExtractionSkill + from ._models_py3 import DocumentKeysOrIds from ._models_py3 import EdgeNGramTokenFilter from ._models_py3 import EdgeNGramTokenFilterV2 from ._models_py3 import EdgeNGramTokenizer @@ -87,7 +88,6 @@ from ._models_py3 import OutputFieldMappingEntry from ._models_py3 import PIIDetectionSkill from ._models_py3 import PathHierarchyTokenizerV2 - from ._models_py3 import Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema from ._models_py3 import PatternAnalyzer from ._models_py3 import PatternCaptureTokenFilter from ._models_py3 import PatternReplaceCharFilter @@ -130,6 +130,7 @@ from ._models_py3 import ShaperSkill from ._models_py3 import ShingleTokenFilter from ._models_py3 import Similarity + from ._models_py3 import SkillNames from ._models_py3 import SnowballTokenFilter from ._models_py3 import SoftDeleteColumnDeletionDetectionPolicy from ._models_py3 import SplitSkill @@ -180,6 +181,7 @@ from ._models import DistanceScoringFunction # type: ignore from ._models import DistanceScoringParameters # type: ignore from ._models import DocumentExtractionSkill # type: ignore + from ._models import DocumentKeysOrIds # type: ignore from ._models import EdgeNGramTokenFilter # type: ignore from ._models import EdgeNGramTokenFilterV2 # type: ignore from ._models import EdgeNGramTokenizer # type: ignore @@ -232,7 +234,6 @@ from ._models import OutputFieldMappingEntry # type: ignore from ._models import PIIDetectionSkill # type: ignore from ._models import PathHierarchyTokenizerV2 # type: ignore - from ._models import Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema # type: ignore from ._models import PatternAnalyzer # type: ignore from ._models import PatternCaptureTokenFilter # type: ignore from ._models import PatternReplaceCharFilter # type: ignore @@ -275,6 +276,7 @@ from ._models import ShaperSkill # type: ignore from ._models import ShingleTokenFilter # type: ignore from ._models import Similarity # type: ignore + from ._models import SkillNames # type: ignore from ._models import SnowballTokenFilter # type: ignore from ._models import SoftDeleteColumnDeletionDetectionPolicy # type: ignore from ._models import SplitSkill # type: ignore @@ -371,6 +373,7 @@ 'DistanceScoringFunction', 'DistanceScoringParameters', 'DocumentExtractionSkill', + 'DocumentKeysOrIds', 'EdgeNGramTokenFilter', 'EdgeNGramTokenFilterV2', 'EdgeNGramTokenizer', @@ -423,7 +426,6 @@ 'OutputFieldMappingEntry', 'PIIDetectionSkill', 'PathHierarchyTokenizerV2', - 'Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema', 'PatternAnalyzer', 'PatternCaptureTokenFilter', 'PatternReplaceCharFilter', @@ -466,6 +468,7 @@ 'ShaperSkill', 'ShingleTokenFilter', 'Similarity', + 'SkillNames', 'SnowballTokenFilter', 'SoftDeleteColumnDeletionDetectionPolicy', 'SplitSkill', diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models.py index a4a0654cb0cc..efb7f70d0e7d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models.py @@ -48,6 +48,8 @@ def __init__( self, **kwargs ): + """ + """ super(AnalyzedTokenInfo, self).__init__(**kwargs) self.token = None self.start_offset = None @@ -60,9 +62,9 @@ class AnalyzeRequest(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword text: Required. The text to break into tokens. - :paramtype text: str - :keyword analyzer: The name of the analyzer to use to break the given text. Possible values + :ivar text: Required. The text to break into tokens. + :vartype text: str + :ivar analyzer: The name of the analyzer to use to break the given text. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", @@ -80,20 +82,19 @@ class AnalyzeRequest(msrest.serialization.Model): "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", "whitespace". - :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :keyword tokenizer: The name of the tokenizer to use to break the given text. Possible values + :vartype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :ivar tokenizer: The name of the tokenizer to use to break the given text. Possible values include: "classic", "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace". - :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName - :keyword normalizer: The name of the normalizer to use to normalize the given text. Possible + :vartype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName + :ivar normalizer: The name of the normalizer to use to normalize the given text. Possible values include: "asciifolding", "elision", "lowercase", "standard", "uppercase". - :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName - :keyword token_filters: An optional list of token filters to use when breaking the given text. - :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] - :keyword char_filters: An optional list of character filters to use when breaking the given - text. - :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + :vartype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName + :ivar token_filters: An optional list of token filters to use when breaking the given text. + :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :ivar char_filters: An optional list of character filters to use when breaking the given text. + :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] """ _validation = { @@ -113,6 +114,42 @@ def __init__( self, **kwargs ): + """ + :keyword text: Required. The text to break into tokens. + :paramtype text: str + :keyword analyzer: The name of the analyzer to use to break the given text. Possible values + include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", + "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", + "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", + "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", + "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", + "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", + "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", + "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", + "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", + "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", + "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", + "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", + "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :keyword tokenizer: The name of the tokenizer to use to break the given text. Possible values + include: "classic", "edgeNGram", "keyword_v2", "letter", "lowercase", + "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram", + "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace". + :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName + :keyword normalizer: The name of the normalizer to use to normalize the given text. Possible + values include: "asciifolding", "elision", "lowercase", "standard", "uppercase". + :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName + :keyword token_filters: An optional list of token filters to use when breaking the given text. + :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :keyword char_filters: An optional list of character filters to use when breaking the given + text. + :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + """ super(AnalyzeRequest, self).__init__(**kwargs) self.text = kwargs['text'] self.analyzer = kwargs.get('analyzer', None) @@ -127,9 +164,8 @@ class AnalyzeResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword tokens: Required. The list of tokens returned by the analyzer specified in the - request. - :paramtype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo] + :ivar tokens: Required. The list of tokens returned by the analyzer specified in the request. + :vartype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo] """ _validation = { @@ -144,6 +180,11 @@ def __init__( self, **kwargs ): + """ + :keyword tokens: Required. The list of tokens returned by the analyzer specified in the + request. + :paramtype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo] + """ super(AnalyzeResult, self).__init__(**kwargs) self.tokens = kwargs['tokens'] @@ -156,13 +197,13 @@ class TokenFilter(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str + :vartype name: str """ _validation = { @@ -183,6 +224,12 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + """ super(TokenFilter, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] self.name = kwargs['name'] @@ -193,16 +240,16 @@ class AsciiFoldingTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword preserve_original: A value indicating whether the original token will be kept. Default - is false. - :paramtype preserve_original: bool + :vartype name: str + :ivar preserve_original: A value indicating whether the original token will be kept. Default is + false. + :vartype preserve_original: bool """ _validation = { @@ -220,6 +267,15 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword preserve_original: A value indicating whether the original token will be kept. Default + is false. + :paramtype preserve_original: bool + """ super(AsciiFoldingTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.AsciiFoldingTokenFilter' # type: str self.preserve_original = kwargs.get('preserve_original', False) @@ -230,12 +286,12 @@ class AzureActiveDirectoryApplicationCredentials(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword application_id: Required. An AAD Application ID that was granted the required access + :ivar application_id: Required. An AAD Application ID that was granted the required access permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The Application ID should not be confused with the Object ID for your AAD Application. - :paramtype application_id: str - :keyword application_secret: The authentication key of the specified AAD application. - :paramtype application_secret: str + :vartype application_id: str + :ivar application_secret: The authentication key of the specified AAD application. + :vartype application_secret: str """ _validation = { @@ -251,6 +307,14 @@ def __init__( self, **kwargs ): + """ + :keyword application_id: Required. An AAD Application ID that was granted the required access + permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The + Application ID should not be confused with the Object ID for your AAD Application. + :paramtype application_id: str + :keyword application_secret: The authentication key of the specified AAD application. + :paramtype application_secret: str + """ super(AzureActiveDirectoryApplicationCredentials, self).__init__(**kwargs) self.application_id = kwargs['application_id'] self.application_secret = kwargs.get('application_secret', None) @@ -264,8 +328,8 @@ class Similarity(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Constant filled by server. - :paramtype odata_type: str + :ivar odata_type: Required. Constant filled by server. + :vartype odata_type: str """ _validation = { @@ -284,6 +348,8 @@ def __init__( self, **kwargs ): + """ + """ super(Similarity, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] @@ -293,16 +359,16 @@ class BM25Similarity(Similarity): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Constant filled by server. - :paramtype odata_type: str - :keyword k1: This property controls the scaling function between the term frequency of each + :ivar odata_type: Required. Constant filled by server. + :vartype odata_type: str + :ivar k1: This property controls the scaling function between the term frequency of each matching terms and the final relevance score of a document-query pair. By default, a value of 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. - :paramtype k1: float - :keyword b: This property controls how the length of a document affects the relevance score. By + :vartype k1: float + :ivar b: This property controls how the length of a document affects the relevance score. By default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, while a value of 1.0 means the score is fully normalized by the length of the document. - :paramtype b: float + :vartype b: float """ _validation = { @@ -319,6 +385,16 @@ def __init__( self, **kwargs ): + """ + :keyword k1: This property controls the scaling function between the term frequency of each + matching terms and the final relevance score of a document-query pair. By default, a value of + 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. + :paramtype k1: float + :keyword b: This property controls how the length of a document affects the relevance score. By + default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, + while a value of 1.0 means the score is fully normalized by the length of the document. + :paramtype b: float + """ super(BM25Similarity, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.BM25Similarity' # type: str self.k1 = kwargs.get('k1', None) @@ -333,13 +409,13 @@ class CharFilter(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the char filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the char filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str + :vartype name: str """ _validation = { @@ -360,6 +436,12 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the char filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + """ super(CharFilter, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] self.name = kwargs['name'] @@ -370,19 +452,19 @@ class CjkBigramTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword ignore_scripts: The scripts to ignore. - :paramtype ignore_scripts: list[str or + :vartype name: str + :ivar ignore_scripts: The scripts to ignore. + :vartype ignore_scripts: list[str or ~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts] - :keyword output_unigrams: A value indicating whether to output both unigrams and bigrams (if + :ivar output_unigrams: A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. - :paramtype output_unigrams: bool + :vartype output_unigrams: bool """ _validation = { @@ -401,6 +483,18 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword ignore_scripts: The scripts to ignore. + :paramtype ignore_scripts: list[str or + ~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts] + :keyword output_unigrams: A value indicating whether to output both unigrams and bigrams (if + true), or just bigrams (if false). Default is false. + :paramtype output_unigrams: bool + """ super(CjkBigramTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.CjkBigramTokenFilter' # type: str self.ignore_scripts = kwargs.get('ignore_scripts', None) @@ -412,8 +506,8 @@ class ClassicSimilarity(Similarity): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Constant filled by server. - :paramtype odata_type: str + :ivar odata_type: Required. Constant filled by server. + :vartype odata_type: str """ _validation = { @@ -428,6 +522,8 @@ def __init__( self, **kwargs ): + """ + """ super(ClassicSimilarity, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.ClassicSimilarity' # type: str @@ -440,13 +536,13 @@ class LexicalTokenizer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str """ _validation = { @@ -467,6 +563,12 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + """ super(LexicalTokenizer, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] self.name = kwargs['name'] @@ -477,16 +579,16 @@ class ClassicTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int + :vartype max_token_length: int """ _validation = { @@ -505,6 +607,15 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :paramtype max_token_length: int + """ super(ClassicTokenizer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.ClassicTokenizer' # type: str self.max_token_length = kwargs.get('max_token_length', 255) @@ -518,11 +629,11 @@ class CognitiveServicesAccount(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the cognitive service resource + :ivar odata_type: Required. Identifies the concrete type of the cognitive service resource attached to a skillset.Constant filled by server. - :paramtype odata_type: str - :keyword description: Description of the cognitive service resource attached to a skillset. - :paramtype description: str + :vartype odata_type: str + :ivar description: Description of the cognitive service resource attached to a skillset. + :vartype description: str """ _validation = { @@ -542,6 +653,10 @@ def __init__( self, **kwargs ): + """ + :keyword description: Description of the cognitive service resource attached to a skillset. + :paramtype description: str + """ super(CognitiveServicesAccount, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] self.description = kwargs.get('description', None) @@ -552,14 +667,14 @@ class CognitiveServicesAccountKey(CognitiveServicesAccount): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the cognitive service resource + :ivar odata_type: Required. Identifies the concrete type of the cognitive service resource attached to a skillset.Constant filled by server. - :paramtype odata_type: str - :keyword description: Description of the cognitive service resource attached to a skillset. - :paramtype description: str - :keyword key: Required. The key used to provision the cognitive service resource attached to a + :vartype odata_type: str + :ivar description: Description of the cognitive service resource attached to a skillset. + :vartype description: str + :ivar key: Required. The key used to provision the cognitive service resource attached to a skillset. - :paramtype key: str + :vartype key: str """ _validation = { @@ -577,6 +692,13 @@ def __init__( self, **kwargs ): + """ + :keyword description: Description of the cognitive service resource attached to a skillset. + :paramtype description: str + :keyword key: Required. The key used to provision the cognitive service resource attached to a + skillset. + :paramtype key: str + """ super(CognitiveServicesAccountKey, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.CognitiveServicesByKey' # type: str self.key = kwargs['key'] @@ -587,22 +709,22 @@ class CommonGramTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword common_words: Required. The set of common words. - :paramtype common_words: list[str] - :keyword ignore_case: A value indicating whether common words matching will be case - insensitive. Default is false. - :paramtype ignore_case: bool - :keyword use_query_mode: A value that indicates whether the token filter is in query mode. When - in query mode, the token filter generates bigrams and then removes common words and single - terms followed by a common word. Default is false. - :paramtype use_query_mode: bool + :vartype name: str + :ivar common_words: Required. The set of common words. + :vartype common_words: list[str] + :ivar ignore_case: A value indicating whether common words matching will be case insensitive. + Default is false. + :vartype ignore_case: bool + :ivar use_query_mode: A value that indicates whether the token filter is in query mode. When in + query mode, the token filter generates bigrams and then removes common words and single terms + followed by a common word. Default is false. + :vartype use_query_mode: bool """ _validation = { @@ -623,6 +745,21 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword common_words: Required. The set of common words. + :paramtype common_words: list[str] + :keyword ignore_case: A value indicating whether common words matching will be case + insensitive. Default is false. + :paramtype ignore_case: bool + :keyword use_query_mode: A value that indicates whether the token filter is in query mode. When + in query mode, the token filter generates bigrams and then removes common words and single + terms followed by a common word. Default is false. + :paramtype use_query_mode: bool + """ super(CommonGramTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.CommonGramTokenFilter' # type: str self.common_words = kwargs['common_words'] @@ -638,26 +775,25 @@ class SearchIndexerSkill(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] """ _validation = { @@ -683,6 +819,25 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + """ super(SearchIndexerSkill, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] self.name = kwargs.get('name', None) @@ -697,26 +852,25 @@ class ConditionalSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] """ _validation = { @@ -738,6 +892,25 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + """ super(ConditionalSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Util.ConditionalSkill' # type: str @@ -747,14 +920,14 @@ class CorsOptions(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword allowed_origins: Required. The list of origins from which JavaScript code will be - granted access to your index. Can contain a list of hosts of the form + :ivar allowed_origins: Required. The list of origins from which JavaScript code will be granted + access to your index. Can contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). - :paramtype allowed_origins: list[str] - :keyword max_age_in_seconds: The duration for which browsers should cache CORS preflight + :vartype allowed_origins: list[str] + :ivar max_age_in_seconds: The duration for which browsers should cache CORS preflight responses. Defaults to 5 minutes. - :paramtype max_age_in_seconds: long + :vartype max_age_in_seconds: long """ _validation = { @@ -770,6 +943,16 @@ def __init__( self, **kwargs ): + """ + :keyword allowed_origins: Required. The list of origins from which JavaScript code will be + granted access to your index. Can contain a list of hosts of the form + {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not + recommended). + :paramtype allowed_origins: list[str] + :keyword max_age_in_seconds: The duration for which browsers should cache CORS preflight + responses. Defaults to 5 minutes. + :paramtype max_age_in_seconds: long + """ super(CorsOptions, self).__init__(**kwargs) self.allowed_origins = kwargs['allowed_origins'] self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) @@ -783,13 +966,13 @@ class LexicalAnalyzer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the analyzer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str + :vartype odata_type: str + :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str """ _validation = { @@ -810,6 +993,12 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the analyzer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + """ super(LexicalAnalyzer, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] self.name = kwargs['name'] @@ -820,27 +1009,27 @@ class CustomAnalyzer(LexicalAnalyzer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the analyzer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword tokenizer: Required. The name of the tokenizer to use to divide continuous text into a + :vartype odata_type: str + :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar tokenizer: Required. The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. Possible values include: "classic", "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace". - :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName - :keyword token_filters: A list of token filters used to filter out or modify the tokens - generated by a tokenizer. For example, you can specify a lowercase filter that converts all - characters to lowercase. The filters are run in the order in which they are listed. - :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] - :keyword char_filters: A list of character filters used to prepare input text before it is + :vartype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName + :ivar token_filters: A list of token filters used to filter out or modify the tokens generated + by a tokenizer. For example, you can specify a lowercase filter that converts all characters to + lowercase. The filters are run in the order in which they are listed. + :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :ivar char_filters: A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. - :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] """ _validation = { @@ -861,6 +1050,26 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the analyzer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword tokenizer: Required. The name of the tokenizer to use to divide continuous text into a + sequence of tokens, such as breaking a sentence into words. Possible values include: "classic", + "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", + "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", + "standard_v2", "uax_url_email", "whitespace". + :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName + :keyword token_filters: A list of token filters used to filter out or modify the tokens + generated by a tokenizer. For example, you can specify a lowercase filter that converts all + characters to lowercase. The filters are run in the order in which they are listed. + :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :keyword char_filters: A list of character filters used to prepare input text before it is + processed by the tokenizer. For instance, they can replace certain characters or symbols. The + filters are run in the order in which they are listed. + :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + """ super(CustomAnalyzer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer' # type: str self.tokenizer = kwargs['tokenizer'] @@ -873,51 +1082,51 @@ class CustomEntity(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The top-level entity descriptor. Matches in the skill output will be + :ivar name: Required. The top-level entity descriptor. Matches in the skill output will be grouped by this name, and it should represent the "normalized" form of the text being found. - :paramtype name: str - :keyword description: This field can be used as a passthrough for custom metadata about the + :vartype name: str + :ivar description: This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. - :paramtype description: str - :keyword type: This field can be used as a passthrough for custom metadata about the matched + :vartype description: str + :ivar type: This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. - :paramtype type: str - :keyword subtype: This field can be used as a passthrough for custom metadata about the matched + :vartype type: str + :ivar subtype: This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. - :paramtype subtype: str - :keyword id: This field can be used as a passthrough for custom metadata about the matched + :vartype subtype: str + :ivar id: This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. - :paramtype id: str - :keyword case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the + :vartype id: str + :ivar case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to character casing. Sample case insensitive matches of "Microsoft" could be: microsoft, microSoft, MICROSOFT. - :paramtype case_sensitive: bool - :keyword accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with - the entity name should be sensitive to accent. - :paramtype accent_sensitive: bool - :keyword fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number - of divergent characters that would still constitute a match with the entity name. The smallest + :vartype case_sensitive: bool + :ivar accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with the + entity name should be sensitive to accent. + :vartype accent_sensitive: bool + :ivar fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number of + divergent characters that would still constitute a match with the entity name. The smallest possible fuzziness for any given match is returned. For instance, if the edit distance is set to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but otherwise do. - :paramtype fuzzy_edit_distance: int - :keyword default_case_sensitive: Changes the default case sensitivity value for this entity. It - be used to change the default value of all aliases caseSensitive values. - :paramtype default_case_sensitive: bool - :keyword default_accent_sensitive: Changes the default accent sensitivity value for this - entity. It be used to change the default value of all aliases accentSensitive values. - :paramtype default_accent_sensitive: bool - :keyword default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this + :vartype fuzzy_edit_distance: int + :ivar default_case_sensitive: Changes the default case sensitivity value for this entity. It be + used to change the default value of all aliases caseSensitive values. + :vartype default_case_sensitive: bool + :ivar default_accent_sensitive: Changes the default accent sensitivity value for this entity. + It be used to change the default value of all aliases accentSensitive values. + :vartype default_accent_sensitive: bool + :ivar default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this entity. It can be used to change the default value of all aliases fuzzyEditDistance values. - :paramtype default_fuzzy_edit_distance: int - :keyword aliases: An array of complex objects that can be used to specify alternative spellings - or synonyms to the root entity name. - :paramtype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias] + :vartype default_fuzzy_edit_distance: int + :ivar aliases: An array of complex objects that can be used to specify alternative spellings or + synonyms to the root entity name. + :vartype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias] """ _validation = { @@ -943,6 +1152,53 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The top-level entity descriptor. Matches in the skill output will be + grouped by this name, and it should represent the "normalized" form of the text being found. + :paramtype name: str + :keyword description: This field can be used as a passthrough for custom metadata about the + matched text(s). The value of this field will appear with every match of its entity in the + skill output. + :paramtype description: str + :keyword type: This field can be used as a passthrough for custom metadata about the matched + text(s). The value of this field will appear with every match of its entity in the skill + output. + :paramtype type: str + :keyword subtype: This field can be used as a passthrough for custom metadata about the matched + text(s). The value of this field will appear with every match of its entity in the skill + output. + :paramtype subtype: str + :keyword id: This field can be used as a passthrough for custom metadata about the matched + text(s). The value of this field will appear with every match of its entity in the skill + output. + :paramtype id: str + :keyword case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the + entity name should be sensitive to character casing. Sample case insensitive matches of + "Microsoft" could be: microsoft, microSoft, MICROSOFT. + :paramtype case_sensitive: bool + :keyword accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with + the entity name should be sensitive to accent. + :paramtype accent_sensitive: bool + :keyword fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number + of divergent characters that would still constitute a match with the entity name. The smallest + possible fuzziness for any given match is returned. For instance, if the edit distance is set + to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case + sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but + otherwise do. + :paramtype fuzzy_edit_distance: int + :keyword default_case_sensitive: Changes the default case sensitivity value for this entity. It + be used to change the default value of all aliases caseSensitive values. + :paramtype default_case_sensitive: bool + :keyword default_accent_sensitive: Changes the default accent sensitivity value for this + entity. It be used to change the default value of all aliases accentSensitive values. + :paramtype default_accent_sensitive: bool + :keyword default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this + entity. It can be used to change the default value of all aliases fuzzyEditDistance values. + :paramtype default_fuzzy_edit_distance: int + :keyword aliases: An array of complex objects that can be used to specify alternative spellings + or synonyms to the root entity name. + :paramtype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias] + """ super(CustomEntity, self).__init__(**kwargs) self.name = kwargs['name'] self.description = kwargs.get('description', None) @@ -963,14 +1219,14 @@ class CustomEntityAlias(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword text: Required. The text of the alias. - :paramtype text: str - :keyword case_sensitive: Determine if the alias is case sensitive. - :paramtype case_sensitive: bool - :keyword accent_sensitive: Determine if the alias is accent sensitive. - :paramtype accent_sensitive: bool - :keyword fuzzy_edit_distance: Determine the fuzzy edit distance of the alias. - :paramtype fuzzy_edit_distance: int + :ivar text: Required. The text of the alias. + :vartype text: str + :ivar case_sensitive: Determine if the alias is case sensitive. + :vartype case_sensitive: bool + :ivar accent_sensitive: Determine if the alias is accent sensitive. + :vartype accent_sensitive: bool + :ivar fuzzy_edit_distance: Determine the fuzzy edit distance of the alias. + :vartype fuzzy_edit_distance: int """ _validation = { @@ -988,6 +1244,16 @@ def __init__( self, **kwargs ): + """ + :keyword text: Required. The text of the alias. + :paramtype text: str + :keyword case_sensitive: Determine if the alias is case sensitive. + :paramtype case_sensitive: bool + :keyword accent_sensitive: Determine if the alias is accent sensitive. + :paramtype accent_sensitive: bool + :keyword fuzzy_edit_distance: Determine the fuzzy edit distance of the alias. + :paramtype fuzzy_edit_distance: int + """ super(CustomEntityAlias, self).__init__(**kwargs) self.text = kwargs['text'] self.case_sensitive = kwargs.get('case_sensitive', None) @@ -1000,47 +1266,45 @@ class CustomEntityLookupSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt". - :paramtype default_language_code: str or + :vartype default_language_code: str or ~azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage - :keyword entities_definition_uri: Path to a JSON or CSV file containing all the target text to + :ivar entities_definition_uri: Path to a JSON or CSV file containing all the target text to match against. This entity definition is read at the beginning of an indexer run. Any updates to this file during an indexer run will not take effect until subsequent runs. This config must be accessible over HTTPS. - :paramtype entities_definition_uri: str - :keyword inline_entities_definition: The inline CustomEntity definition. - :paramtype inline_entities_definition: - list[~azure.search.documents.indexes.models.CustomEntity] - :keyword global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is + :vartype entities_definition_uri: str + :ivar inline_entities_definition: The inline CustomEntity definition. + :vartype inline_entities_definition: list[~azure.search.documents.indexes.models.CustomEntity] + :ivar global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is not + set in CustomEntity, this value will be the default value. + :vartype global_default_case_sensitive: bool + :ivar global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive is not set in CustomEntity, this value will be the default value. - :paramtype global_default_case_sensitive: bool - :keyword global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive - is not set in CustomEntity, this value will be the default value. - :paramtype global_default_accent_sensitive: bool - :keyword global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If + :vartype global_default_accent_sensitive: bool + :ivar global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in CustomEntity, this value will be the default value. - :paramtype global_default_fuzzy_edit_distance: int + :vartype global_default_fuzzy_edit_distance: int """ _validation = { @@ -1068,6 +1332,46 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage + :keyword entities_definition_uri: Path to a JSON or CSV file containing all the target text to + match against. This entity definition is read at the beginning of an indexer run. Any updates + to this file during an indexer run will not take effect until subsequent runs. This config must + be accessible over HTTPS. + :paramtype entities_definition_uri: str + :keyword inline_entities_definition: The inline CustomEntity definition. + :paramtype inline_entities_definition: + list[~azure.search.documents.indexes.models.CustomEntity] + :keyword global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is + not set in CustomEntity, this value will be the default value. + :paramtype global_default_case_sensitive: bool + :keyword global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive + is not set in CustomEntity, this value will be the default value. + :paramtype global_default_accent_sensitive: bool + :keyword global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If + FuzzyEditDistance is not set in CustomEntity, this value will be the default value. + :paramtype global_default_fuzzy_edit_distance: int + """ super(CustomEntityLookupSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.CustomEntityLookupSkill' # type: str self.default_language_code = kwargs.get('default_language_code', None) @@ -1083,13 +1387,13 @@ class LexicalNormalizer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the normalizer. - :paramtype odata_type: str - :keyword name: Required. The name of the normalizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named - 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. - :paramtype name: str + :ivar odata_type: Required. Identifies the concrete type of the normalizer. + :vartype odata_type: str + :ivar name: Required. The name of the normalizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', + 'standard', 'lowercase', 'uppercase', or 'elision'. + :vartype name: str """ _validation = { @@ -1106,6 +1410,15 @@ def __init__( self, **kwargs ): + """ + :keyword odata_type: Required. Identifies the concrete type of the normalizer. + :paramtype odata_type: str + :keyword name: Required. The name of the normalizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named + 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. + :paramtype name: str + """ super(LexicalNormalizer, self).__init__(**kwargs) self.odata_type = kwargs['odata_type'] self.name = kwargs['name'] @@ -1116,21 +1429,21 @@ class CustomNormalizer(LexicalNormalizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the normalizer. - :paramtype odata_type: str - :keyword name: Required. The name of the normalizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named - 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. - :paramtype name: str - :keyword token_filters: A list of token filters used to filter out or modify the input token. - For example, you can specify a lowercase filter that converts all characters to lowercase. The + :ivar odata_type: Required. Identifies the concrete type of the normalizer. + :vartype odata_type: str + :ivar name: Required. The name of the normalizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', + 'standard', 'lowercase', 'uppercase', or 'elision'. + :vartype name: str + :ivar token_filters: A list of token filters used to filter out or modify the input token. For + example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. - :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] - :keyword char_filters: A list of character filters used to prepare input text before it is + :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :ivar char_filters: A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. - :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] """ _validation = { @@ -1149,6 +1462,23 @@ def __init__( self, **kwargs ): + """ + :keyword odata_type: Required. Identifies the concrete type of the normalizer. + :paramtype odata_type: str + :keyword name: Required. The name of the normalizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named + 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. + :paramtype name: str + :keyword token_filters: A list of token filters used to filter out or modify the input token. + For example, you can specify a lowercase filter that converts all characters to lowercase. The + filters are run in the order in which they are listed. + :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :keyword char_filters: A list of character filters used to prepare input text before it is + processed. For instance, they can replace certain characters or symbols. The filters are run in + the order in which they are listed. + :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + """ super(CustomNormalizer, self).__init__(**kwargs) self.token_filters = kwargs.get('token_filters', None) self.char_filters = kwargs.get('char_filters', None) @@ -1162,9 +1492,9 @@ class DataChangeDetectionPolicy(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the data change detection + :ivar odata_type: Required. Identifies the concrete type of the data change detection policy.Constant filled by server. - :paramtype odata_type: str + :vartype odata_type: str """ _validation = { @@ -1183,6 +1513,8 @@ def __init__( self, **kwargs ): + """ + """ super(DataChangeDetectionPolicy, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] @@ -1195,9 +1527,9 @@ class DataDeletionDetectionPolicy(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the data deletion detection + :ivar odata_type: Required. Identifies the concrete type of the data deletion detection policy.Constant filled by server. - :paramtype odata_type: str + :vartype odata_type: str """ _validation = { @@ -1216,6 +1548,8 @@ def __init__( self, **kwargs ): + """ + """ super(DataDeletionDetectionPolicy, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] @@ -1223,9 +1557,9 @@ def __init__( class DataSourceCredentials(msrest.serialization.Model): """Represents credentials that can be used to connect to a datasource. - :keyword connection_string: The connection string for the datasource. Set to - ':code:``' if you do not want the connection string updated. - :paramtype connection_string: str + :ivar connection_string: The connection string for the datasource. Set to ':code:``' + if you do not want the connection string updated. + :vartype connection_string: str """ _attribute_map = { @@ -1236,6 +1570,11 @@ def __init__( self, **kwargs ): + """ + :keyword connection_string: The connection string for the datasource. Set to + ':code:``' if you do not want the connection string updated. + :paramtype connection_string: str + """ super(DataSourceCredentials, self).__init__(**kwargs) self.connection_string = kwargs.get('connection_string', None) @@ -1245,11 +1584,11 @@ class DefaultCognitiveServicesAccount(CognitiveServicesAccount): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the cognitive service resource + :ivar odata_type: Required. Identifies the concrete type of the cognitive service resource attached to a skillset.Constant filled by server. - :paramtype odata_type: str - :keyword description: Description of the cognitive service resource attached to a skillset. - :paramtype description: str + :vartype odata_type: str + :ivar description: Description of the cognitive service resource attached to a skillset. + :vartype description: str """ _validation = { @@ -1265,6 +1604,10 @@ def __init__( self, **kwargs ): + """ + :keyword description: Description of the cognitive service resource attached to a skillset. + :paramtype description: str + """ super(DefaultCognitiveServicesAccount, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.DefaultCognitiveServices' # type: str @@ -1274,27 +1617,27 @@ class DictionaryDecompounderTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword word_list: Required. The list of words to match against. - :paramtype word_list: list[str] - :keyword min_word_size: The minimum word size. Only words longer than this get processed. - Default is 5. Maximum is 300. - :paramtype min_word_size: int - :keyword min_subword_size: The minimum subword size. Only subwords longer than this are - outputted. Default is 2. Maximum is 300. - :paramtype min_subword_size: int - :keyword max_subword_size: The maximum subword size. Only subwords shorter than this are + :vartype name: str + :ivar word_list: Required. The list of words to match against. + :vartype word_list: list[str] + :ivar min_word_size: The minimum word size. Only words longer than this get processed. Default + is 5. Maximum is 300. + :vartype min_word_size: int + :ivar min_subword_size: The minimum subword size. Only subwords longer than this are outputted. + Default is 2. Maximum is 300. + :vartype min_subword_size: int + :ivar max_subword_size: The maximum subword size. Only subwords shorter than this are outputted. Default is 15. Maximum is 300. - :paramtype max_subword_size: int - :keyword only_longest_match: A value indicating whether to add only the longest matching - subword to the output. Default is false. - :paramtype only_longest_match: bool + :vartype max_subword_size: int + :ivar only_longest_match: A value indicating whether to add only the longest matching subword + to the output. Default is false. + :vartype only_longest_match: bool """ _validation = { @@ -1320,6 +1663,26 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword word_list: Required. The list of words to match against. + :paramtype word_list: list[str] + :keyword min_word_size: The minimum word size. Only words longer than this get processed. + Default is 5. Maximum is 300. + :paramtype min_word_size: int + :keyword min_subword_size: The minimum subword size. Only subwords longer than this are + outputted. Default is 2. Maximum is 300. + :paramtype min_subword_size: int + :keyword max_subword_size: The maximum subword size. Only subwords shorter than this are + outputted. Default is 15. Maximum is 300. + :paramtype max_subword_size: int + :keyword only_longest_match: A value indicating whether to add only the longest matching + subword to the output. Default is false. + :paramtype only_longest_match: bool + """ super(DictionaryDecompounderTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' # type: str self.word_list = kwargs['word_list'] @@ -1337,18 +1700,18 @@ class ScoringFunction(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword type: Required. Indicates the type of function to use. Valid values include magnitude, + :ivar type: Required. Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case.Constant filled by server. - :paramtype type: str - :keyword field_name: Required. The name of the field used as input to the scoring function. - :paramtype field_name: str - :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal - to 1.0. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document + :vartype type: str + :ivar field_name: Required. The name of the field used as input to the scoring function. + :vartype field_name: str + :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", "logarithmic". - :paramtype interpolation: str or + :vartype interpolation: str or ~azure.search.documents.indexes.models.ScoringFunctionInterpolation """ @@ -1373,6 +1736,18 @@ def __init__( self, **kwargs ): + """ + :keyword field_name: Required. The name of the field used as input to the scoring function. + :paramtype field_name: str + :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal + to 1.0. + :paramtype boost: float + :keyword interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :paramtype interpolation: str or + ~azure.search.documents.indexes.models.ScoringFunctionInterpolation + """ super(ScoringFunction, self).__init__(**kwargs) self.type = None # type: Optional[str] self.field_name = kwargs['field_name'] @@ -1385,21 +1760,21 @@ class DistanceScoringFunction(ScoringFunction): All required parameters must be populated in order to send to Azure. - :keyword type: Required. Indicates the type of function to use. Valid values include magnitude, + :ivar type: Required. Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case.Constant filled by server. - :paramtype type: str - :keyword field_name: Required. The name of the field used as input to the scoring function. - :paramtype field_name: str - :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal - to 1.0. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document + :vartype type: str + :ivar field_name: Required. The name of the field used as input to the scoring function. + :vartype field_name: str + :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", "logarithmic". - :paramtype interpolation: str or + :vartype interpolation: str or ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :keyword parameters: Required. Parameter values for the distance scoring function. - :paramtype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters + :ivar parameters: Required. Parameter values for the distance scoring function. + :vartype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters """ _validation = { @@ -1421,6 +1796,20 @@ def __init__( self, **kwargs ): + """ + :keyword field_name: Required. The name of the field used as input to the scoring function. + :paramtype field_name: str + :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal + to 1.0. + :paramtype boost: float + :keyword interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :paramtype interpolation: str or + ~azure.search.documents.indexes.models.ScoringFunctionInterpolation + :keyword parameters: Required. Parameter values for the distance scoring function. + :paramtype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters + """ super(DistanceScoringFunction, self).__init__(**kwargs) self.type = 'distance' # type: str self.parameters = kwargs['parameters'] @@ -1431,12 +1820,12 @@ class DistanceScoringParameters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword reference_point_parameter: Required. The name of the parameter passed in search - queries to specify the reference location. - :paramtype reference_point_parameter: str - :keyword boosting_distance: Required. The distance in kilometers from the reference location - where the boosting range ends. - :paramtype boosting_distance: float + :ivar reference_point_parameter: Required. The name of the parameter passed in search queries + to specify the reference location. + :vartype reference_point_parameter: str + :ivar boosting_distance: Required. The distance in kilometers from the reference location where + the boosting range ends. + :vartype boosting_distance: float """ _validation = { @@ -1453,6 +1842,14 @@ def __init__( self, **kwargs ): + """ + :keyword reference_point_parameter: Required. The name of the parameter passed in search + queries to specify the reference location. + :paramtype reference_point_parameter: str + :keyword boosting_distance: Required. The distance in kilometers from the reference location + where the boosting range ends. + :paramtype boosting_distance: float + """ super(DistanceScoringParameters, self).__init__(**kwargs) self.reference_point_parameter = kwargs['reference_point_parameter'] self.boosting_distance = kwargs['boosting_distance'] @@ -1463,33 +1860,32 @@ class DocumentExtractionSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined. - :paramtype parsing_mode: str - :keyword data_to_extract: The type of data to be extracted for the skill. Will be set to + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined. + :vartype parsing_mode: str + :ivar data_to_extract: The type of data to be extracted for the skill. Will be set to 'contentAndMetadata' if not defined. - :paramtype data_to_extract: str - :keyword configuration: A dictionary of configurations for the skill. - :paramtype configuration: dict[str, any] + :vartype data_to_extract: str + :ivar configuration: A dictionary of configurations for the skill. + :vartype configuration: dict[str, any] """ _validation = { @@ -1514,6 +1910,32 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined. + :paramtype parsing_mode: str + :keyword data_to_extract: The type of data to be extracted for the skill. Will be set to + 'contentAndMetadata' if not defined. + :paramtype data_to_extract: str + :keyword configuration: A dictionary of configurations for the skill. + :paramtype configuration: dict[str, any] + """ super(DocumentExtractionSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Util.DocumentExtractionSkill' # type: str self.parsing_mode = kwargs.get('parsing_mode', None) @@ -1521,26 +1943,55 @@ def __init__( self.configuration = kwargs.get('configuration', None) +class DocumentKeysOrIds(msrest.serialization.Model): + """DocumentKeysOrIds. + + :ivar document_keys: document keys to be reset. + :vartype document_keys: list[str] + :ivar datasource_document_ids: datasource document identifiers to be reset. + :vartype datasource_document_ids: list[str] + """ + + _attribute_map = { + 'document_keys': {'key': 'documentKeys', 'type': '[str]'}, + 'datasource_document_ids': {'key': 'datasourceDocumentIds', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword document_keys: document keys to be reset. + :paramtype document_keys: list[str] + :keyword datasource_document_ids: datasource document identifiers to be reset. + :paramtype datasource_document_ids: list[str] + """ + super(DocumentKeysOrIds, self).__init__(**kwargs) + self.document_keys = kwargs.get('document_keys', None) + self.datasource_document_ids = kwargs.get('datasource_document_ids', None) + + class EdgeNGramTokenFilter(TokenFilter): """Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. - :paramtype max_gram: int - :keyword side: Specifies which side of the input the n-gram should be generated from. Default - is "front". Possible values include: "front", "back". - :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. + :vartype max_gram: int + :ivar side: Specifies which side of the input the n-gram should be generated from. Default is + "front". Possible values include: "front", "back". + :vartype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide """ _validation = { @@ -1560,6 +2011,20 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + maxGram. + :paramtype min_gram: int + :keyword max_gram: The maximum n-gram length. Default is 2. + :paramtype max_gram: int + :keyword side: Specifies which side of the input the n-gram should be generated from. Default + is "front". Possible values include: "front", "back". + :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide + """ super(EdgeNGramTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilter' # type: str self.min_gram = kwargs.get('min_gram', 1) @@ -1572,21 +2037,21 @@ class EdgeNGramTokenFilterV2(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than - the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :paramtype max_gram: int - :keyword side: Specifies which side of the input the n-gram should be generated from. Default - is "front". Possible values include: "front", "back". - :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :vartype max_gram: int + :ivar side: Specifies which side of the input the n-gram should be generated from. Default is + "front". Possible values include: "front", "back". + :vartype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide """ _validation = { @@ -1608,6 +2073,20 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than + the value of maxGram. + :paramtype min_gram: int + :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :paramtype max_gram: int + :keyword side: Specifies which side of the input the n-gram should be generated from. Default + is "front". Possible values include: "front", "back". + :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide + """ super(EdgeNGramTokenFilterV2, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2' # type: str self.min_gram = kwargs.get('min_gram', 1) @@ -1620,20 +2099,20 @@ class EdgeNGramTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than - the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :paramtype max_gram: int - :keyword token_chars: Character classes to keep in the tokens. - :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :vartype max_gram: int + :ivar token_chars: Character classes to keep in the tokens. + :vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] """ _validation = { @@ -1655,6 +2134,19 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than + the value of maxGram. + :paramtype min_gram: int + :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :paramtype max_gram: int + :keyword token_chars: Character classes to keep in the tokens. + :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] + """ super(EdgeNGramTokenizer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenizer' # type: str self.min_gram = kwargs.get('min_gram', 1) @@ -1667,15 +2159,15 @@ class ElisionTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword articles: The set of articles to remove. - :paramtype articles: list[str] + :vartype name: str + :ivar articles: The set of articles to remove. + :vartype articles: list[str] """ _validation = { @@ -1693,6 +2185,14 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword articles: The set of articles to remove. + :paramtype articles: list[str] + """ super(ElisionTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.ElisionTokenFilter' # type: str self.articles = kwargs.get('articles', None) @@ -1703,36 +2203,35 @@ class EntityLinkingSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. - :paramtype default_language_code: str - :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. + :vartype default_language_code: str + :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. - :paramtype minimum_precision: float - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str + :vartype minimum_precision: float + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It will default to the latest available when not specified. We recommend you do not specify + this value unless absolutely necessary. + :vartype model_version: str """ _validation = { @@ -1758,6 +2257,35 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + :paramtype default_language_code: str + :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence score is greater than the value specified. If not set (default), or if explicitly + set to null, all entities will be included. + :paramtype minimum_precision: float + :keyword model_version: The version of the model to use when calling the Text Analytics + service. It will default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary. + :paramtype model_version: str + """ super(EntityLinkingSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.V3.EntityLinkingSkill' # type: str self.default_language_code = kwargs.get('default_language_code', None) @@ -1770,42 +2298,41 @@ class EntityRecognitionSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword categories: A list of entity categories that should be extracted. - :paramtype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar categories: A list of entity categories that should be extracted. + :vartype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr". - :paramtype default_language_code: str or + :vartype default_language_code: str or ~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage - :keyword include_typeless_entities: Determines whether or not to include entities which are - well known but don't conform to a pre-defined type. If this configuration is not set (default), - set to null or set to false, entities which don't conform to one of the pre-defined types will - not be surfaced. - :paramtype include_typeless_entities: bool - :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + :ivar include_typeless_entities: Determines whether or not to include entities which are well + known but don't conform to a pre-defined type. If this configuration is not set (default), set + to null or set to false, entities which don't conform to one of the pre-defined types will not + be surfaced. + :vartype include_typeless_entities: bool + :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. - :paramtype minimum_precision: float + :vartype minimum_precision: float """ _validation = { @@ -1831,6 +2358,41 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword categories: A list of entity categories that should be extracted. + :paramtype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", + "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage + :keyword include_typeless_entities: Determines whether or not to include entities which are + well known but don't conform to a pre-defined type. If this configuration is not set (default), + set to null or set to false, entities which don't conform to one of the pre-defined types will + not be surfaced. + :paramtype include_typeless_entities: bool + :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence score is greater than the value specified. If not set (default), or if explicitly + set to null, all entities will be included. + :paramtype minimum_precision: float + """ super(EntityRecognitionSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.EntityRecognitionSkill' # type: str self.categories = kwargs.get('categories', None) @@ -1844,38 +2406,37 @@ class EntityRecognitionSkillV3(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword categories: A list of entity categories that should be extracted. - :paramtype categories: list[str] - :keyword default_language_code: A value indicating which language code to use. Default is en. - :paramtype default_language_code: str - :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar categories: A list of entity categories that should be extracted. + :vartype categories: list[str] + :ivar default_language_code: A value indicating which language code to use. Default is en. + :vartype default_language_code: str + :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. - :paramtype minimum_precision: float - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str + :vartype minimum_precision: float + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It will default to the latest available when not specified. We recommend you do not specify + this value unless absolutely necessary. + :vartype model_version: str """ _validation = { @@ -1902,6 +2463,37 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword categories: A list of entity categories that should be extracted. + :paramtype categories: list[str] + :keyword default_language_code: A value indicating which language code to use. Default is en. + :paramtype default_language_code: str + :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence score is greater than the value specified. If not set (default), or if explicitly + set to null, all entities will be included. + :paramtype minimum_precision: float + :keyword model_version: The version of the model to use when calling the Text Analytics + service. It will default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary. + :paramtype model_version: str + """ super(EntityRecognitionSkillV3, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.V3.EntityRecognitionSkill' # type: str self.categories = kwargs.get('categories', None) @@ -1915,13 +2507,13 @@ class FieldMapping(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword source_field_name: Required. The name of the field in the data source. - :paramtype source_field_name: str - :keyword target_field_name: The name of the target field in the index. Same as the source field + :ivar source_field_name: Required. The name of the field in the data source. + :vartype source_field_name: str + :ivar target_field_name: The name of the target field in the index. Same as the source field name by default. - :paramtype target_field_name: str - :keyword mapping_function: A function to apply to each source field value before indexing. - :paramtype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction + :vartype target_field_name: str + :ivar mapping_function: A function to apply to each source field value before indexing. + :vartype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction """ _validation = { @@ -1938,6 +2530,15 @@ def __init__( self, **kwargs ): + """ + :keyword source_field_name: Required. The name of the field in the data source. + :paramtype source_field_name: str + :keyword target_field_name: The name of the target field in the index. Same as the source field + name by default. + :paramtype target_field_name: str + :keyword mapping_function: A function to apply to each source field value before indexing. + :paramtype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction + """ super(FieldMapping, self).__init__(**kwargs) self.source_field_name = kwargs['source_field_name'] self.target_field_name = kwargs.get('target_field_name', None) @@ -1949,11 +2550,11 @@ class FieldMappingFunction(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the field mapping function. - :paramtype name: str - :keyword parameters: A dictionary of parameter name/value pairs to pass to the function. Each + :ivar name: Required. The name of the field mapping function. + :vartype name: str + :ivar parameters: A dictionary of parameter name/value pairs to pass to the function. Each value must be of a primitive type. - :paramtype parameters: dict[str, any] + :vartype parameters: dict[str, any] """ _validation = { @@ -1969,6 +2570,13 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the field mapping function. + :paramtype name: str + :keyword parameters: A dictionary of parameter name/value pairs to pass to the function. Each + value must be of a primitive type. + :paramtype parameters: dict[str, any] + """ super(FieldMappingFunction, self).__init__(**kwargs) self.name = kwargs['name'] self.parameters = kwargs.get('parameters', None) @@ -1979,21 +2587,21 @@ class FreshnessScoringFunction(ScoringFunction): All required parameters must be populated in order to send to Azure. - :keyword type: Required. Indicates the type of function to use. Valid values include magnitude, + :ivar type: Required. Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case.Constant filled by server. - :paramtype type: str - :keyword field_name: Required. The name of the field used as input to the scoring function. - :paramtype field_name: str - :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal - to 1.0. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document + :vartype type: str + :ivar field_name: Required. The name of the field used as input to the scoring function. + :vartype field_name: str + :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", "logarithmic". - :paramtype interpolation: str or + :vartype interpolation: str or ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :keyword parameters: Required. Parameter values for the freshness scoring function. - :paramtype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters + :ivar parameters: Required. Parameter values for the freshness scoring function. + :vartype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters """ _validation = { @@ -2015,6 +2623,20 @@ def __init__( self, **kwargs ): + """ + :keyword field_name: Required. The name of the field used as input to the scoring function. + :paramtype field_name: str + :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal + to 1.0. + :paramtype boost: float + :keyword interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :paramtype interpolation: str or + ~azure.search.documents.indexes.models.ScoringFunctionInterpolation + :keyword parameters: Required. Parameter values for the freshness scoring function. + :paramtype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters + """ super(FreshnessScoringFunction, self).__init__(**kwargs) self.type = 'freshness' # type: str self.parameters = kwargs['parameters'] @@ -2025,9 +2647,9 @@ class FreshnessScoringParameters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword boosting_duration: Required. The expiration period after which boosting will stop for - a particular document. - :paramtype boosting_duration: ~datetime.timedelta + :ivar boosting_duration: Required. The expiration period after which boosting will stop for a + particular document. + :vartype boosting_duration: ~datetime.timedelta """ _validation = { @@ -2042,6 +2664,11 @@ def __init__( self, **kwargs ): + """ + :keyword boosting_duration: Required. The expiration period after which boosting will stop for + a particular document. + :paramtype boosting_duration: ~datetime.timedelta + """ super(FreshnessScoringParameters, self).__init__(**kwargs) self.boosting_duration = kwargs['boosting_duration'] @@ -2073,6 +2700,8 @@ def __init__( self, **kwargs ): + """ + """ super(GetIndexStatisticsResult, self).__init__(**kwargs) self.document_count = None self.storage_size = None @@ -2083,11 +2712,11 @@ class HighWaterMarkChangeDetectionPolicy(DataChangeDetectionPolicy): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the data change detection + :ivar odata_type: Required. Identifies the concrete type of the data change detection policy.Constant filled by server. - :paramtype odata_type: str - :keyword high_water_mark_column_name: Required. The name of the high water mark column. - :paramtype high_water_mark_column_name: str + :vartype odata_type: str + :ivar high_water_mark_column_name: Required. The name of the high water mark column. + :vartype high_water_mark_column_name: str """ _validation = { @@ -2104,6 +2733,10 @@ def __init__( self, **kwargs ): + """ + :keyword high_water_mark_column_name: Required. The name of the high water mark column. + :paramtype high_water_mark_column_name: str + """ super(HighWaterMarkChangeDetectionPolicy, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' # type: str self.high_water_mark_column_name = kwargs['high_water_mark_column_name'] @@ -2114,34 +2747,33 @@ class ImageAnalysisSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "en", "es", "ja", "pt", "zh". - :paramtype default_language_code: str or + :vartype default_language_code: str or ~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage - :keyword visual_features: A list of visual features. - :paramtype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature] - :keyword details: A string indicating which domain-specific details to return. - :paramtype details: list[str or ~azure.search.documents.indexes.models.ImageDetail] + :ivar visual_features: A list of visual features. + :vartype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature] + :ivar details: A string indicating which domain-specific details to return. + :vartype details: list[str or ~azure.search.documents.indexes.models.ImageDetail] """ _validation = { @@ -2166,6 +2798,33 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "en", "es", "ja", "pt", "zh". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage + :keyword visual_features: A list of visual features. + :paramtype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature] + :keyword details: A string indicating which domain-specific details to return. + :paramtype details: list[str or ~azure.search.documents.indexes.models.ImageDetail] + """ super(ImageAnalysisSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Vision.ImageAnalysisSkill' # type: str self.default_language_code = kwargs.get('default_language_code', None) @@ -2227,6 +2886,8 @@ def __init__( self, **kwargs ): + """ + """ super(IndexerCurrentState, self).__init__(**kwargs) self.mode = None self.all_docs_initial_change_tracking_state = None @@ -2311,6 +2972,8 @@ def __init__( self, **kwargs ): + """ + """ super(IndexerExecutionResult, self).__init__(**kwargs) self.status = None self.status_detail = None @@ -2329,19 +2992,18 @@ def __init__( class IndexingParameters(msrest.serialization.Model): """Represents parameters for indexer execution. - :keyword batch_size: The number of items that are read from the data source and indexed as a + :ivar batch_size: The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. - :paramtype batch_size: int - :keyword max_failed_items: The maximum number of items that can fail indexing for indexer + :vartype batch_size: int + :ivar max_failed_items: The maximum number of items that can fail indexing for indexer execution to still be considered successful. -1 means no limit. Default is 0. - :paramtype max_failed_items: int - :keyword max_failed_items_per_batch: The maximum number of items in a single batch that can - fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. - :paramtype max_failed_items_per_batch: int - :keyword configuration: A dictionary of indexer-specific configuration properties. Each name is + :vartype max_failed_items: int + :ivar max_failed_items_per_batch: The maximum number of items in a single batch that can fail + indexing for the batch to still be considered successful. -1 means no limit. Default is 0. + :vartype max_failed_items_per_batch: int + :ivar configuration: A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. - :paramtype configuration: - ~azure.search.documents.indexes.models.IndexingParametersConfiguration + :vartype configuration: ~azure.search.documents.indexes.models.IndexingParametersConfiguration """ _attribute_map = { @@ -2355,6 +3017,21 @@ def __init__( self, **kwargs ): + """ + :keyword batch_size: The number of items that are read from the data source and indexed as a + single batch in order to improve performance. The default depends on the data source type. + :paramtype batch_size: int + :keyword max_failed_items: The maximum number of items that can fail indexing for indexer + execution to still be considered successful. -1 means no limit. Default is 0. + :paramtype max_failed_items: int + :keyword max_failed_items_per_batch: The maximum number of items in a single batch that can + fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. + :paramtype max_failed_items_per_batch: int + :keyword configuration: A dictionary of indexer-specific configuration properties. Each name is + the name of a specific property. Each value must be of a primitive type. + :paramtype configuration: + ~azure.search.documents.indexes.models.IndexingParametersConfiguration + """ super(IndexingParameters, self).__init__(**kwargs) self.batch_size = kwargs.get('batch_size', None) self.max_failed_items = kwargs.get('max_failed_items', 0) @@ -2365,73 +3042,73 @@ def __init__( class IndexingParametersConfiguration(msrest.serialization.Model): """A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] - :keyword parsing_mode: Represents the parsing mode for indexing from an Azure blob data source. + :vartype additional_properties: dict[str, any] + :ivar parsing_mode: Represents the parsing mode for indexing from an Azure blob data source. Possible values include: "default", "text", "delimitedText", "json", "jsonArray", "jsonLines". Default value: "default". - :paramtype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode - :keyword excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore - when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip - over those files during indexing. - :paramtype excluded_file_name_extensions: str - :keyword indexed_file_name_extensions: Comma-delimited list of filename extensions to select - when processing from Azure blob storage. For example, you could focus indexing on specific + :vartype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode + :ivar excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore when + processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip over + those files during indexing. + :vartype excluded_file_name_extensions: str + :ivar indexed_file_name_extensions: Comma-delimited list of filename extensions to select when + processing from Azure blob storage. For example, you could focus indexing on specific application files ".docx, .pptx, .msg" to specifically include those file types. - :paramtype indexed_file_name_extensions: str - :keyword fail_on_unsupported_content_type: For Azure blobs, set to false if you want to - continue indexing when an unsupported content type is encountered, and you don't know all the - content types (file extensions) in advance. - :paramtype fail_on_unsupported_content_type: bool - :keyword fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue + :vartype indexed_file_name_extensions: str + :ivar fail_on_unsupported_content_type: For Azure blobs, set to false if you want to continue + indexing when an unsupported content type is encountered, and you don't know all the content + types (file extensions) in advance. + :vartype fail_on_unsupported_content_type: bool + :ivar fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue indexing if a document fails indexing. - :paramtype fail_on_unprocessable_document: bool - :keyword index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this - property to true to still index storage metadata for blob content that is too large to process. + :vartype fail_on_unprocessable_document: bool + :ivar index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this property + to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://docs.microsoft.com/azure/search/search-limits-quotas-capacity. - :paramtype index_storage_metadata_only_for_oversized_documents: bool - :keyword delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column + :vartype index_storage_metadata_only_for_oversized_documents: bool + :ivar delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source fields to destination fields in an index. - :paramtype delimited_text_headers: str - :keyword delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character + :vartype delimited_text_headers: str + :ivar delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character delimiter for CSV files where each line starts a new document (for example, "|"). - :paramtype delimited_text_delimiter: str - :keyword first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line - of each blob contains headers. - :paramtype first_line_contains_headers: bool - :keyword document_root: For JSON arrays, given a structured or semi-structured document, you - can specify a path to the array using this property. - :paramtype document_root: str - :keyword data_to_extract: Specifies the data to extract from Azure blob storage and tells the + :vartype delimited_text_delimiter: str + :ivar first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line of + each blob contains headers. + :vartype first_line_contains_headers: bool + :ivar document_root: For JSON arrays, given a structured or semi-structured document, you can + specify a path to the array using this property. + :vartype document_root: str + :ivar data_to_extract: Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. Possible values include: "storageMetadata", "allMetadata", "contentAndMetadata". Default value: "contentAndMetadata". - :paramtype data_to_extract: str or + :vartype data_to_extract: str or ~azure.search.documents.indexes.models.BlobIndexerDataToExtract - :keyword image_action: Determines how to process embedded images and image files in Azure blob + :ivar image_action: Determines how to process embedded images and image files in Azure blob storage. Setting the "imageAction" configuration to any value other than "none" requires that a skillset also be attached to that indexer. Possible values include: "none", "generateNormalizedImages", "generateNormalizedImagePerPage". Default value: "none". - :paramtype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction - :keyword allow_skillset_to_read_file_data: If true, will create a path //document//file_data - that is an object representing the original file data downloaded from your blob data source. - This allows you to pass the original file data to a custom skill for processing within the + :vartype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction + :ivar allow_skillset_to_read_file_data: If true, will create a path //document//file_data that + is an object representing the original file data downloaded from your blob data source. This + allows you to pass the original file data to a custom skill for processing within the enrichment pipeline, or to the Document Extraction skill. - :paramtype allow_skillset_to_read_file_data: bool - :keyword pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files - in Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none". - :paramtype pdf_text_rotation_algorithm: str or + :vartype allow_skillset_to_read_file_data: bool + :ivar pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files in + Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none". + :vartype pdf_text_rotation_algorithm: str or ~azure.search.documents.indexes.models.BlobIndexerPDFTextRotationAlgorithm - :keyword execution_environment: Specifies the environment in which the indexer should execute. + :ivar execution_environment: Specifies the environment in which the indexer should execute. Possible values include: "standard", "private". Default value: "standard". - :paramtype execution_environment: str or + :vartype execution_environment: str or ~azure.search.documents.indexes.models.IndexerExecutionEnvironment - :keyword query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL - database data sources, specified in the format "hh:mm:ss". - :paramtype query_timeout: str + :ivar query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL database + data sources, specified in the format "hh:mm:ss". + :vartype query_timeout: str """ _attribute_map = { @@ -2458,6 +3135,75 @@ def __init__( self, **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + :keyword parsing_mode: Represents the parsing mode for indexing from an Azure blob data source. + Possible values include: "default", "text", "delimitedText", "json", "jsonArray", "jsonLines". + Default value: "default". + :paramtype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode + :keyword excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore + when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip + over those files during indexing. + :paramtype excluded_file_name_extensions: str + :keyword indexed_file_name_extensions: Comma-delimited list of filename extensions to select + when processing from Azure blob storage. For example, you could focus indexing on specific + application files ".docx, .pptx, .msg" to specifically include those file types. + :paramtype indexed_file_name_extensions: str + :keyword fail_on_unsupported_content_type: For Azure blobs, set to false if you want to + continue indexing when an unsupported content type is encountered, and you don't know all the + content types (file extensions) in advance. + :paramtype fail_on_unsupported_content_type: bool + :keyword fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue + indexing if a document fails indexing. + :paramtype fail_on_unprocessable_document: bool + :keyword index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this + property to true to still index storage metadata for blob content that is too large to process. + Oversized blobs are treated as errors by default. For limits on blob size, see + https://docs.microsoft.com/azure/search/search-limits-quotas-capacity. + :paramtype index_storage_metadata_only_for_oversized_documents: bool + :keyword delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column + headers, useful for mapping source fields to destination fields in an index. + :paramtype delimited_text_headers: str + :keyword delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character + delimiter for CSV files where each line starts a new document (for example, "|"). + :paramtype delimited_text_delimiter: str + :keyword first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line + of each blob contains headers. + :paramtype first_line_contains_headers: bool + :keyword document_root: For JSON arrays, given a structured or semi-structured document, you + can specify a path to the array using this property. + :paramtype document_root: str + :keyword data_to_extract: Specifies the data to extract from Azure blob storage and tells the + indexer which data to extract from image content when "imageAction" is set to a value other + than "none". This applies to embedded image content in a .PDF or other application, or image + files such as .jpg and .png, in Azure blobs. Possible values include: "storageMetadata", + "allMetadata", "contentAndMetadata". Default value: "contentAndMetadata". + :paramtype data_to_extract: str or + ~azure.search.documents.indexes.models.BlobIndexerDataToExtract + :keyword image_action: Determines how to process embedded images and image files in Azure blob + storage. Setting the "imageAction" configuration to any value other than "none" requires that + a skillset also be attached to that indexer. Possible values include: "none", + "generateNormalizedImages", "generateNormalizedImagePerPage". Default value: "none". + :paramtype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction + :keyword allow_skillset_to_read_file_data: If true, will create a path //document//file_data + that is an object representing the original file data downloaded from your blob data source. + This allows you to pass the original file data to a custom skill for processing within the + enrichment pipeline, or to the Document Extraction skill. + :paramtype allow_skillset_to_read_file_data: bool + :keyword pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files + in Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none". + :paramtype pdf_text_rotation_algorithm: str or + ~azure.search.documents.indexes.models.BlobIndexerPDFTextRotationAlgorithm + :keyword execution_environment: Specifies the environment in which the indexer should execute. + Possible values include: "standard", "private". Default value: "standard". + :paramtype execution_environment: str or + ~azure.search.documents.indexes.models.IndexerExecutionEnvironment + :keyword query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL + database data sources, specified in the format "hh:mm:ss". + :paramtype query_timeout: str + """ super(IndexingParametersConfiguration, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.parsing_mode = kwargs.get('parsing_mode', "default") @@ -2483,10 +3229,10 @@ class IndexingSchedule(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword interval: Required. The interval of time between indexer executions. - :paramtype interval: ~datetime.timedelta - :keyword start_time: The time when an indexer should start running. - :paramtype start_time: ~datetime.datetime + :ivar interval: Required. The interval of time between indexer executions. + :vartype interval: ~datetime.timedelta + :ivar start_time: The time when an indexer should start running. + :vartype start_time: ~datetime.datetime """ _validation = { @@ -2502,6 +3248,12 @@ def __init__( self, **kwargs ): + """ + :keyword interval: Required. The interval of time between indexer executions. + :paramtype interval: ~datetime.timedelta + :keyword start_time: The time when an indexer should start running. + :paramtype start_time: ~datetime.datetime + """ super(IndexingSchedule, self).__init__(**kwargs) self.interval = kwargs['interval'] self.start_time = kwargs.get('start_time', None) @@ -2512,14 +3264,14 @@ class InputFieldMappingEntry(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the input. - :paramtype name: str - :keyword source: The source of the input. - :paramtype source: str - :keyword source_context: The source context used for selecting recursive inputs. - :paramtype source_context: str - :keyword inputs: The recursive inputs used when creating a complex type. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar name: Required. The name of the input. + :vartype name: str + :ivar source: The source of the input. + :vartype source: str + :ivar source_context: The source context used for selecting recursive inputs. + :vartype source_context: str + :ivar inputs: The recursive inputs used when creating a complex type. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] """ _validation = { @@ -2537,6 +3289,16 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the input. + :paramtype name: str + :keyword source: The source of the input. + :paramtype source: str + :keyword source_context: The source context used for selecting recursive inputs. + :paramtype source_context: str + :keyword inputs: The recursive inputs used when creating a complex type. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + """ super(InputFieldMappingEntry, self).__init__(**kwargs) self.name = kwargs['name'] self.source = kwargs.get('source', None) @@ -2549,18 +3311,18 @@ class KeepTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword keep_words: Required. The list of words to keep. - :paramtype keep_words: list[str] - :keyword lower_case_keep_words: A value indicating whether to lower case all words first. - Default is false. - :paramtype lower_case_keep_words: bool + :vartype name: str + :ivar keep_words: Required. The list of words to keep. + :vartype keep_words: list[str] + :ivar lower_case_keep_words: A value indicating whether to lower case all words first. Default + is false. + :vartype lower_case_keep_words: bool """ _validation = { @@ -2580,6 +3342,17 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword keep_words: Required. The list of words to keep. + :paramtype keep_words: list[str] + :keyword lower_case_keep_words: A value indicating whether to lower case all words first. + Default is false. + :paramtype lower_case_keep_words: bool + """ super(KeepTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.KeepTokenFilter' # type: str self.keep_words = kwargs['keep_words'] @@ -2591,38 +3364,37 @@ class KeyPhraseExtractionSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv". - :paramtype default_language_code: str or + :vartype default_language_code: str or ~azure.search.documents.indexes.models.KeyPhraseExtractionSkillLanguage - :keyword max_key_phrase_count: A number indicating how many key phrases to return. If absent, - all identified key phrases will be returned. - :paramtype max_key_phrase_count: int - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str + :ivar max_key_phrase_count: A number indicating how many key phrases to return. If absent, all + identified key phrases will be returned. + :vartype max_key_phrase_count: int + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It will default to the latest available when not specified. We recommend you do not specify + this value unless absolutely necessary. + :vartype model_version: str """ _validation = { @@ -2647,6 +3419,37 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl", + "pt-PT", "pt-BR", "ru", "es", "sv". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.KeyPhraseExtractionSkillLanguage + :keyword max_key_phrase_count: A number indicating how many key phrases to return. If absent, + all identified key phrases will be returned. + :paramtype max_key_phrase_count: int + :keyword model_version: The version of the model to use when calling the Text Analytics + service. It will default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary. + :paramtype model_version: str + """ super(KeyPhraseExtractionSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.KeyPhraseExtractionSkill' # type: str self.default_language_code = kwargs.get('default_language_code', None) @@ -2659,18 +3462,18 @@ class KeywordMarkerTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword keywords: Required. A list of words to mark as keywords. - :paramtype keywords: list[str] - :keyword ignore_case: A value indicating whether to ignore case. If true, all words are - converted to lower case first. Default is false. - :paramtype ignore_case: bool + :vartype name: str + :ivar keywords: Required. A list of words to mark as keywords. + :vartype keywords: list[str] + :ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted + to lower case first. Default is false. + :vartype ignore_case: bool """ _validation = { @@ -2690,6 +3493,17 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword keywords: Required. A list of words to mark as keywords. + :paramtype keywords: list[str] + :keyword ignore_case: A value indicating whether to ignore case. If true, all words are + converted to lower case first. Default is false. + :paramtype ignore_case: bool + """ super(KeywordMarkerTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' # type: str self.keywords = kwargs['keywords'] @@ -2701,15 +3515,15 @@ class KeywordTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword buffer_size: The read buffer size in bytes. Default is 256. - :paramtype buffer_size: int + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar buffer_size: The read buffer size in bytes. Default is 256. + :vartype buffer_size: int """ _validation = { @@ -2727,6 +3541,14 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword buffer_size: The read buffer size in bytes. Default is 256. + :paramtype buffer_size: int + """ super(KeywordTokenizer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizer' # type: str self.buffer_size = kwargs.get('buffer_size', 256) @@ -2737,16 +3559,16 @@ class KeywordTokenizerV2(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 256. Tokens longer than the + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 256. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int + :vartype max_token_length: int """ _validation = { @@ -2765,6 +3587,15 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Default is 256. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :paramtype max_token_length: int + """ super(KeywordTokenizerV2, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizerV2' # type: str self.max_token_length = kwargs.get('max_token_length', 256) @@ -2775,33 +3606,32 @@ class LanguageDetectionSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_country_hint: A country code to use as a hint to the language detection model - if it cannot disambiguate the language. - :paramtype default_country_hint: str - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_country_hint: A country code to use as a hint to the language detection model if + it cannot disambiguate the language. + :vartype default_country_hint: str + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It will default to the latest available when not specified. We recommend you do not specify + this value unless absolutely necessary. + :vartype model_version: str """ _validation = { @@ -2825,6 +3655,32 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_country_hint: A country code to use as a hint to the language detection model + if it cannot disambiguate the language. + :paramtype default_country_hint: str + :keyword model_version: The version of the model to use when calling the Text Analytics + service. It will default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary. + :paramtype model_version: str + """ super(LanguageDetectionSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.LanguageDetectionSkill' # type: str self.default_country_hint = kwargs.get('default_country_hint', None) @@ -2836,18 +3692,18 @@ class LengthTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be - less than the value of max. - :paramtype min_length: int - :keyword max_length: The maximum length in characters. Default and maximum is 300. - :paramtype max_length: int + :vartype name: str + :ivar min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be less + than the value of max. + :vartype min_length: int + :ivar max_length: The maximum length in characters. Default and maximum is 300. + :vartype max_length: int """ _validation = { @@ -2868,6 +3724,17 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be + less than the value of max. + :paramtype min_length: int + :keyword max_length: The maximum length in characters. Default and maximum is 300. + :paramtype max_length: int + """ super(LengthTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.LengthTokenFilter' # type: str self.min_length = kwargs.get('min_length', 0) @@ -2879,18 +3746,18 @@ class LimitTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword max_token_count: The maximum number of tokens to produce. Default is 1. - :paramtype max_token_count: int - :keyword consume_all_tokens: A value indicating whether all tokens from the input must be - consumed even if maxTokenCount is reached. Default is false. - :paramtype consume_all_tokens: bool + :vartype name: str + :ivar max_token_count: The maximum number of tokens to produce. Default is 1. + :vartype max_token_count: int + :ivar consume_all_tokens: A value indicating whether all tokens from the input must be consumed + even if maxTokenCount is reached. Default is false. + :vartype consume_all_tokens: bool """ _validation = { @@ -2909,6 +3776,17 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_count: The maximum number of tokens to produce. Default is 1. + :paramtype max_token_count: int + :keyword consume_all_tokens: A value indicating whether all tokens from the input must be + consumed even if maxTokenCount is reached. Default is false. + :paramtype consume_all_tokens: bool + """ super(LimitTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.LimitTokenFilter' # type: str self.max_token_count = kwargs.get('max_token_count', 1) @@ -2938,6 +3816,8 @@ def __init__( self, **kwargs ): + """ + """ super(ListDataSourcesResult, self).__init__(**kwargs) self.data_sources = None @@ -2965,6 +3845,8 @@ def __init__( self, **kwargs ): + """ + """ super(ListIndexersResult, self).__init__(**kwargs) self.indexers = None @@ -2992,6 +3874,8 @@ def __init__( self, **kwargs ): + """ + """ super(ListIndexesResult, self).__init__(**kwargs) self.indexes = None @@ -3019,6 +3903,8 @@ def __init__( self, **kwargs ): + """ + """ super(ListSkillsetsResult, self).__init__(**kwargs) self.skillsets = None @@ -3046,6 +3932,8 @@ def __init__( self, **kwargs ): + """ + """ super(ListSynonymMapsResult, self).__init__(**kwargs) self.synonym_maps = None @@ -3055,18 +3943,18 @@ class LuceneStandardAnalyzer(LexicalAnalyzer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the analyzer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + :vartype odata_type: str + :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int - :keyword stopwords: A list of stopwords. - :paramtype stopwords: list[str] + :vartype max_token_length: int + :ivar stopwords: A list of stopwords. + :vartype stopwords: list[str] """ _validation = { @@ -3086,6 +3974,17 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the analyzer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :paramtype max_token_length: int + :keyword stopwords: A list of stopwords. + :paramtype stopwords: list[str] + """ super(LuceneStandardAnalyzer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' # type: str self.max_token_length = kwargs.get('max_token_length', 255) @@ -3097,16 +3996,16 @@ class LuceneStandardTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the maximum length are split. - :paramtype max_token_length: int + :vartype max_token_length: int """ _validation = { @@ -3124,6 +4023,15 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. + :paramtype max_token_length: int + """ super(LuceneStandardTokenizer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' # type: str self.max_token_length = kwargs.get('max_token_length', 255) @@ -3134,16 +4042,16 @@ class LuceneStandardTokenizerV2(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int + :vartype max_token_length: int """ _validation = { @@ -3162,6 +4070,15 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :paramtype max_token_length: int + """ super(LuceneStandardTokenizerV2, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' # type: str self.max_token_length = kwargs.get('max_token_length', 255) @@ -3172,21 +4089,21 @@ class MagnitudeScoringFunction(ScoringFunction): All required parameters must be populated in order to send to Azure. - :keyword type: Required. Indicates the type of function to use. Valid values include magnitude, + :ivar type: Required. Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case.Constant filled by server. - :paramtype type: str - :keyword field_name: Required. The name of the field used as input to the scoring function. - :paramtype field_name: str - :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal - to 1.0. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document + :vartype type: str + :ivar field_name: Required. The name of the field used as input to the scoring function. + :vartype field_name: str + :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", "logarithmic". - :paramtype interpolation: str or + :vartype interpolation: str or ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :keyword parameters: Required. Parameter values for the magnitude scoring function. - :paramtype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters + :ivar parameters: Required. Parameter values for the magnitude scoring function. + :vartype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters """ _validation = { @@ -3208,6 +4125,20 @@ def __init__( self, **kwargs ): + """ + :keyword field_name: Required. The name of the field used as input to the scoring function. + :paramtype field_name: str + :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal + to 1.0. + :paramtype boost: float + :keyword interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :paramtype interpolation: str or + ~azure.search.documents.indexes.models.ScoringFunctionInterpolation + :keyword parameters: Required. Parameter values for the magnitude scoring function. + :paramtype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters + """ super(MagnitudeScoringFunction, self).__init__(**kwargs) self.type = 'magnitude' # type: str self.parameters = kwargs['parameters'] @@ -3218,13 +4149,13 @@ class MagnitudeScoringParameters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword boosting_range_start: Required. The field value at which boosting starts. - :paramtype boosting_range_start: float - :keyword boosting_range_end: Required. The field value at which boosting ends. - :paramtype boosting_range_end: float - :keyword should_boost_beyond_range_by_constant: A value indicating whether to apply a constant + :ivar boosting_range_start: Required. The field value at which boosting starts. + :vartype boosting_range_start: float + :ivar boosting_range_end: Required. The field value at which boosting ends. + :vartype boosting_range_end: float + :ivar should_boost_beyond_range_by_constant: A value indicating whether to apply a constant boost for field values beyond the range end value; default is false. - :paramtype should_boost_beyond_range_by_constant: bool + :vartype should_boost_beyond_range_by_constant: bool """ _validation = { @@ -3242,6 +4173,15 @@ def __init__( self, **kwargs ): + """ + :keyword boosting_range_start: Required. The field value at which boosting starts. + :paramtype boosting_range_start: float + :keyword boosting_range_end: Required. The field value at which boosting ends. + :paramtype boosting_range_end: float + :keyword should_boost_beyond_range_by_constant: A value indicating whether to apply a constant + boost for field values beyond the range end value; default is false. + :paramtype should_boost_beyond_range_by_constant: bool + """ super(MagnitudeScoringParameters, self).__init__(**kwargs) self.boosting_range_start = kwargs['boosting_range_start'] self.boosting_range_end = kwargs['boosting_range_end'] @@ -3253,16 +4193,16 @@ class MappingCharFilter(CharFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the char filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the char filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword mappings: Required. A list of mappings of the following format: "a=>b" (all - occurrences of the character "a" will be replaced with character "b"). - :paramtype mappings: list[str] + :vartype name: str + :ivar mappings: Required. A list of mappings of the following format: "a=>b" (all occurrences + of the character "a" will be replaced with character "b"). + :vartype mappings: list[str] """ _validation = { @@ -3281,6 +4221,15 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the char filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword mappings: Required. A list of mappings of the following format: "a=>b" (all + occurrences of the character "a" will be replaced with character "b"). + :paramtype mappings: list[str] + """ super(MappingCharFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.MappingCharFilter' # type: str self.mappings = kwargs['mappings'] @@ -3291,32 +4240,31 @@ class MergeSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is - an empty space. - :paramtype insert_pre_tag: str - :keyword insert_post_tag: The tag indicates the end of the merged text. By default, the tag is - an empty space. - :paramtype insert_post_tag: str + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is an + empty space. + :vartype insert_pre_tag: str + :ivar insert_post_tag: The tag indicates the end of the merged text. By default, the tag is an + empty space. + :vartype insert_post_tag: str """ _validation = { @@ -3340,6 +4288,31 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is + an empty space. + :paramtype insert_pre_tag: str + :keyword insert_post_tag: The tag indicates the end of the merged text. By default, the tag is + an empty space. + :paramtype insert_post_tag: str + """ super(MergeSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.MergeSkill' # type: str self.insert_pre_tag = kwargs.get('insert_pre_tag', " ") @@ -3351,29 +4324,29 @@ class MicrosoftLanguageStemmingTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. - :paramtype max_token_length: int - :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used - as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. - :paramtype is_search_tokenizer: bool - :keyword language: The language to use. The default is English. Possible values include: - "arabic", "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english", + :vartype max_token_length: int + :ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as + the search tokenizer, set to false if used as the indexing tokenizer. Default is false. + :vartype is_search_tokenizer: bool + :ivar language: The language to use. The default is English. Possible values include: "arabic", + "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english", "estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian", "icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam", "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish", "swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu". - :paramtype language: str or + :vartype language: str or ~azure.search.documents.indexes.models.MicrosoftStemmingTokenizerLanguage """ @@ -3395,6 +4368,29 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are + split. Maximum token length that can be used is 300 characters. Tokens longer than 300 + characters are first split into tokens of length 300 and then each of those tokens is split + based on the max token length set. Default is 255. + :paramtype max_token_length: int + :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used + as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. + :paramtype is_search_tokenizer: bool + :keyword language: The language to use. The default is English. Possible values include: + "arabic", "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english", + "estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian", + "icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam", + "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", + "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish", + "swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu". + :paramtype language: str or + ~azure.search.documents.indexes.models.MicrosoftStemmingTokenizerLanguage + """ super(MicrosoftLanguageStemmingTokenizer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer' # type: str self.max_token_length = kwargs.get('max_token_length', 255) @@ -3407,29 +4403,29 @@ class MicrosoftLanguageTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. - :paramtype max_token_length: int - :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used - as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. - :paramtype is_search_tokenizer: bool - :keyword language: The language to use. The default is English. Possible values include: - "bangla", "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", - "czech", "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", - "icelandic", "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", - "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", - "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", - "tamil", "telugu", "thai", "ukrainian", "urdu", "vietnamese". - :paramtype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage + :vartype max_token_length: int + :ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as + the search tokenizer, set to false if used as the indexing tokenizer. Default is false. + :vartype is_search_tokenizer: bool + :ivar language: The language to use. The default is English. Possible values include: "bangla", + "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", "czech", + "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", "icelandic", + "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", "marathi", + "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", "romanian", + "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", "tamil", + "telugu", "thai", "ukrainian", "urdu", "vietnamese". + :vartype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage """ _validation = { @@ -3450,6 +4446,28 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are + split. Maximum token length that can be used is 300 characters. Tokens longer than 300 + characters are first split into tokens of length 300 and then each of those tokens is split + based on the max token length set. Default is 255. + :paramtype max_token_length: int + :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used + as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. + :paramtype is_search_tokenizer: bool + :keyword language: The language to use. The default is English. Possible values include: + "bangla", "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", + "czech", "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", + "icelandic", "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", + "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", + "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", + "tamil", "telugu", "thai", "ukrainian", "urdu", "vietnamese". + :paramtype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage + """ super(MicrosoftLanguageTokenizer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer' # type: str self.max_token_length = kwargs.get('max_token_length', 255) @@ -3462,18 +4480,18 @@ class NGramTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. - :paramtype max_gram: int + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. + :vartype max_gram: int """ _validation = { @@ -3492,6 +4510,17 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + maxGram. + :paramtype min_gram: int + :keyword max_gram: The maximum n-gram length. Default is 2. + :paramtype max_gram: int + """ super(NGramTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilter' # type: str self.min_gram = kwargs.get('min_gram', 1) @@ -3503,18 +4532,18 @@ class NGramTokenFilterV2(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than - the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :paramtype max_gram: int + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :vartype max_gram: int """ _validation = { @@ -3535,6 +4564,17 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than + the value of maxGram. + :paramtype min_gram: int + :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :paramtype max_gram: int + """ super(NGramTokenFilterV2, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilterV2' # type: str self.min_gram = kwargs.get('min_gram', 1) @@ -3546,20 +4586,20 @@ class NGramTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than - the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :paramtype max_gram: int - :keyword token_chars: Character classes to keep in the tokens. - :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :vartype max_gram: int + :ivar token_chars: Character classes to keep in the tokens. + :vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] """ _validation = { @@ -3581,6 +4621,19 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than + the value of maxGram. + :paramtype min_gram: int + :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :paramtype max_gram: int + :keyword token_chars: Character classes to keep in the tokens. + :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] + """ super(NGramTokenizer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.NGramTokenizer' # type: str self.min_gram = kwargs.get('min_gram', 1) @@ -3593,39 +4646,37 @@ class OcrSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el", "hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl", "sr-Latn", "sk". - :paramtype default_language_code: str or - ~azure.search.documents.indexes.models.OcrSkillLanguage - :keyword should_detect_orientation: A value indicating to turn orientation detection on or not. + :vartype default_language_code: str or ~azure.search.documents.indexes.models.OcrSkillLanguage + :ivar should_detect_orientation: A value indicating to turn orientation detection on or not. Default is false. - :paramtype should_detect_orientation: bool - :keyword line_ending: Defines the sequence of characters to use between the lines of text + :vartype should_detect_orientation: bool + :ivar line_ending: Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is "space". Possible values include: "space", "carriageReturn", "lineFeed", "carriageReturnLineFeed". - :paramtype line_ending: str or ~azure.search.documents.indexes.models.LineEnding + :vartype line_ending: str or ~azure.search.documents.indexes.models.LineEnding """ _validation = { @@ -3650,6 +4701,38 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el", + "hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl", + "sr-Latn", "sk". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.OcrSkillLanguage + :keyword should_detect_orientation: A value indicating to turn orientation detection on or not. + Default is false. + :paramtype should_detect_orientation: bool + :keyword line_ending: Defines the sequence of characters to use between the lines of text + recognized by the OCR skill. The default value is "space". Possible values include: "space", + "carriageReturn", "lineFeed", "carriageReturnLineFeed". + :paramtype line_ending: str or ~azure.search.documents.indexes.models.LineEnding + """ super(OcrSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Vision.OcrSkill' # type: str self.default_language_code = kwargs.get('default_language_code', None) @@ -3662,10 +4745,10 @@ class OutputFieldMappingEntry(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the output defined by the skill. - :paramtype name: str - :keyword target_name: The target name of the output. It is optional and default to name. - :paramtype target_name: str + :ivar name: Required. The name of the output defined by the skill. + :vartype name: str + :ivar target_name: The target name of the output. It is optional and default to name. + :vartype target_name: str """ _validation = { @@ -3681,6 +4764,12 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the output defined by the skill. + :paramtype name: str + :keyword target_name: The target name of the output. It is optional and default to name. + :paramtype target_name: str + """ super(OutputFieldMappingEntry, self).__init__(**kwargs) self.name = kwargs['name'] self.target_name = kwargs.get('target_name', None) @@ -3691,24 +4780,24 @@ class PathHierarchyTokenizerV2(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword delimiter: The delimiter character to use. Default is "/". - :paramtype delimiter: str - :keyword replacement: A value that, if set, replaces the delimiter character. Default is "/". - :paramtype replacement: str - :keyword max_token_length: The maximum token length. Default and maximum is 300. - :paramtype max_token_length: int - :keyword reverse_token_order: A value indicating whether to generate tokens in reverse order. + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar delimiter: The delimiter character to use. Default is "/". + :vartype delimiter: str + :ivar replacement: A value that, if set, replaces the delimiter character. Default is "/". + :vartype replacement: str + :ivar max_token_length: The maximum token length. Default and maximum is 300. + :vartype max_token_length: int + :ivar reverse_token_order: A value indicating whether to generate tokens in reverse order. Default is false. - :paramtype reverse_token_order: bool - :keyword number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0. - :paramtype number_of_tokens_to_skip: int + :vartype reverse_token_order: bool + :ivar number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0. + :vartype number_of_tokens_to_skip: int """ _validation = { @@ -3731,6 +4820,23 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword delimiter: The delimiter character to use. Default is "/". + :paramtype delimiter: str + :keyword replacement: A value that, if set, replaces the delimiter character. Default is "/". + :paramtype replacement: str + :keyword max_token_length: The maximum token length. Default and maximum is 300. + :paramtype max_token_length: int + :keyword reverse_token_order: A value indicating whether to generate tokens in reverse order. + Default is false. + :paramtype reverse_token_order: bool + :keyword number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0. + :paramtype number_of_tokens_to_skip: int + """ super(PathHierarchyTokenizerV2, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.PathHierarchyTokenizerV2' # type: str self.delimiter = kwargs.get('delimiter', "/") @@ -3740,52 +4846,29 @@ def __init__( self.number_of_tokens_to_skip = kwargs.get('number_of_tokens_to_skip', 0) -class Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model): - """Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema. - - :keyword document_keys: document keys to be reset. - :paramtype document_keys: list[str] - :keyword datasource_document_ids: datasource document identifiers to be reset. - :paramtype datasource_document_ids: list[str] - """ - - _attribute_map = { - 'document_keys': {'key': 'documentKeys', 'type': '[str]'}, - 'datasource_document_ids': {'key': 'datasourceDocumentIds', 'type': '[str]'}, - } - - def __init__( - self, - **kwargs - ): - super(Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs) - self.document_keys = kwargs.get('document_keys', None) - self.datasource_document_ids = kwargs.get('datasource_document_ids', None) - - class PatternAnalyzer(LexicalAnalyzer): """Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the analyzer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword lower_case_terms: A value indicating whether terms should be lower-cased. Default is + :vartype odata_type: str + :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar lower_case_terms: A value indicating whether terms should be lower-cased. Default is true. - :paramtype lower_case_terms: bool - :keyword pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more non-word characters. - :paramtype pattern: str - :keyword flags: Regular expression flags. Possible values include: "CANON_EQ", - "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". - :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags - :keyword stopwords: A list of stopwords. - :paramtype stopwords: list[str] + :vartype lower_case_terms: bool + :ivar pattern: A regular expression pattern to match token separators. Default is an expression + that matches one or more non-word characters. + :vartype pattern: str + :ivar flags: Regular expression flags. Possible values include: "CANON_EQ", "CASE_INSENSITIVE", + "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :vartype flags: str or ~azure.search.documents.indexes.models.RegexFlags + :ivar stopwords: A list of stopwords. + :vartype stopwords: list[str] """ _validation = { @@ -3806,6 +4889,23 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the analyzer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword lower_case_terms: A value indicating whether terms should be lower-cased. Default is + true. + :paramtype lower_case_terms: bool + :keyword pattern: A regular expression pattern to match token separators. Default is an + expression that matches one or more non-word characters. + :paramtype pattern: str + :keyword flags: Regular expression flags. Possible values include: "CANON_EQ", + "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags + :keyword stopwords: A list of stopwords. + :paramtype stopwords: list[str] + """ super(PatternAnalyzer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternAnalyzer' # type: str self.lower_case_terms = kwargs.get('lower_case_terms', True) @@ -3819,18 +4919,18 @@ class PatternCaptureTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword patterns: Required. A list of patterns to match against each token. - :paramtype patterns: list[str] - :keyword preserve_original: A value indicating whether to return the original token even if one - of the patterns matches. Default is true. - :paramtype preserve_original: bool + :vartype name: str + :ivar patterns: Required. A list of patterns to match against each token. + :vartype patterns: list[str] + :ivar preserve_original: A value indicating whether to return the original token even if one of + the patterns matches. Default is true. + :vartype preserve_original: bool """ _validation = { @@ -3850,6 +4950,17 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword patterns: Required. A list of patterns to match against each token. + :paramtype patterns: list[str] + :keyword preserve_original: A value indicating whether to return the original token even if one + of the patterns matches. Default is true. + :paramtype preserve_original: bool + """ super(PatternCaptureTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternCaptureTokenFilter' # type: str self.patterns = kwargs['patterns'] @@ -3861,17 +4972,17 @@ class PatternReplaceCharFilter(CharFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the char filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the char filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword pattern: Required. A regular expression pattern. - :paramtype pattern: str - :keyword replacement: Required. The replacement text. - :paramtype replacement: str + :vartype name: str + :ivar pattern: Required. A regular expression pattern. + :vartype pattern: str + :ivar replacement: Required. The replacement text. + :vartype replacement: str """ _validation = { @@ -3892,6 +5003,16 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the char filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword pattern: Required. A regular expression pattern. + :paramtype pattern: str + :keyword replacement: Required. The replacement text. + :paramtype replacement: str + """ super(PatternReplaceCharFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternReplaceCharFilter' # type: str self.pattern = kwargs['pattern'] @@ -3903,17 +5024,17 @@ class PatternReplaceTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword pattern: Required. A regular expression pattern. - :paramtype pattern: str - :keyword replacement: Required. The replacement text. - :paramtype replacement: str + :vartype name: str + :ivar pattern: Required. A regular expression pattern. + :vartype pattern: str + :ivar replacement: Required. The replacement text. + :vartype replacement: str """ _validation = { @@ -3934,6 +5055,16 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword pattern: Required. A regular expression pattern. + :paramtype pattern: str + :keyword replacement: Required. The replacement text. + :paramtype replacement: str + """ super(PatternReplaceTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' # type: str self.pattern = kwargs['pattern'] @@ -3945,23 +5076,23 @@ class PatternTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more non-word characters. - :paramtype pattern: str - :keyword flags: Regular expression flags. Possible values include: "CANON_EQ", - "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". - :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags - :keyword group: The zero-based ordinal of the matching group in the regular expression pattern - to extract into tokens. Use -1 if you want to use the entire pattern to split the input into + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar pattern: A regular expression pattern to match token separators. Default is an expression + that matches one or more non-word characters. + :vartype pattern: str + :ivar flags: Regular expression flags. Possible values include: "CANON_EQ", "CASE_INSENSITIVE", + "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :vartype flags: str or ~azure.search.documents.indexes.models.RegexFlags + :ivar group: The zero-based ordinal of the matching group in the regular expression pattern to + extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. - :paramtype group: int + :vartype group: int """ _validation = { @@ -3981,6 +5112,22 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword pattern: A regular expression pattern to match token separators. Default is an + expression that matches one or more non-word characters. + :paramtype pattern: str + :keyword flags: Regular expression flags. Possible values include: "CANON_EQ", + "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags + :keyword group: The zero-based ordinal of the matching group in the regular expression pattern + to extract into tokens. Use -1 if you want to use the entire pattern to split the input into + tokens, irrespective of matching groups. Default is -1. + :paramtype group: int + """ super(PatternTokenizer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternTokenizer' # type: str self.pattern = kwargs.get('pattern', "\W+") @@ -3993,20 +5140,20 @@ class PhoneticTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword encoder: The phonetic encoder to use. Default is "metaphone". Possible values include: + :vartype name: str + :ivar encoder: The phonetic encoder to use. Default is "metaphone". Possible values include: "metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2", "cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse". - :paramtype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder - :keyword replace_original_tokens: A value indicating whether encoded tokens should replace + :vartype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder + :ivar replace_original_tokens: A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. - :paramtype replace_original_tokens: bool + :vartype replace_original_tokens: bool """ _validation = { @@ -4025,6 +5172,19 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword encoder: The phonetic encoder to use. Default is "metaphone". Possible values include: + "metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2", + "cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse". + :paramtype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder + :keyword replace_original_tokens: A value indicating whether encoded tokens should replace + original tokens. If false, encoded tokens are added as synonyms. Default is true. + :paramtype replace_original_tokens: bool + """ super(PhoneticTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.PhoneticTokenFilter' # type: str self.encoder = kwargs.get('encoder', None) @@ -4036,48 +5196,47 @@ class PIIDetectionSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. - :paramtype default_language_code: str - :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. + :vartype default_language_code: str + :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. - :paramtype minimum_precision: float - :keyword masking_mode: A parameter that provides various ways to mask the personal information + :vartype minimum_precision: float + :ivar masking_mode: A parameter that provides various ways to mask the personal information detected in the input text. Default is 'none'. Possible values include: "none", "replace". - :paramtype masking_mode: str or + :vartype masking_mode: str or ~azure.search.documents.indexes.models.PIIDetectionSkillMaskingMode - :keyword masking_character: The character used to mask the text if the maskingMode parameter is + :ivar masking_character: The character used to mask the text if the maskingMode parameter is set to replace. Default is '*'. - :paramtype masking_character: str - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str - :keyword pii_categories: A list of PII entity categories that should be extracted and masked. - :paramtype pii_categories: list[str] - :keyword domain: If specified, will set the PII domain to include only a subset of the entity + :vartype masking_character: str + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It will default to the latest available when not specified. We recommend you do not specify + this value unless absolutely necessary. + :vartype model_version: str + :ivar pii_categories: A list of PII entity categories that should be extracted and masked. + :vartype pii_categories: list[str] + :ivar domain: If specified, will set the PII domain to include only a subset of the entity categories. Possible values include: 'phi', 'none'. Default is 'none'. - :paramtype domain: str + :vartype domain: str """ _validation = { @@ -4108,6 +5267,47 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + :paramtype default_language_code: str + :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence score is greater than the value specified. If not set (default), or if explicitly + set to null, all entities will be included. + :paramtype minimum_precision: float + :keyword masking_mode: A parameter that provides various ways to mask the personal information + detected in the input text. Default is 'none'. Possible values include: "none", "replace". + :paramtype masking_mode: str or + ~azure.search.documents.indexes.models.PIIDetectionSkillMaskingMode + :keyword masking_character: The character used to mask the text if the maskingMode parameter is + set to replace. Default is '*'. + :paramtype masking_character: str + :keyword model_version: The version of the model to use when calling the Text Analytics + service. It will default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary. + :paramtype model_version: str + :keyword pii_categories: A list of PII entity categories that should be extracted and masked. + :paramtype pii_categories: list[str] + :keyword domain: If specified, will set the PII domain to include only a subset of the entity + categories. Possible values include: 'phi', 'none'. Default is 'none'. + :paramtype domain: str + """ super(PIIDetectionSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.PIIDetectionSkill' # type: str self.default_language_code = kwargs.get('default_language_code', None) @@ -4122,8 +5322,8 @@ def __init__( class RequestOptions(msrest.serialization.Model): """Parameter group. - :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging. - :paramtype x_ms_client_request_id: str + :ivar x_ms_client_request_id: The tracking ID sent with the request to help with debugging. + :vartype x_ms_client_request_id: str """ _attribute_map = { @@ -4134,6 +5334,10 @@ def __init__( self, **kwargs ): + """ + :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging. + :paramtype x_ms_client_request_id: str + """ super(RequestOptions, self).__init__(**kwargs) self.x_ms_client_request_id = kwargs.get('x_ms_client_request_id', None) @@ -4143,10 +5347,10 @@ class ResourceCounter(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword usage: Required. The resource usage amount. - :paramtype usage: long - :keyword quota: The resource amount quota. - :paramtype quota: long + :ivar usage: Required. The resource usage amount. + :vartype usage: long + :ivar quota: The resource amount quota. + :vartype quota: long """ _validation = { @@ -4162,6 +5366,12 @@ def __init__( self, **kwargs ): + """ + :keyword usage: Required. The resource usage amount. + :paramtype usage: long + :keyword quota: The resource amount quota. + :paramtype quota: long + """ super(ResourceCounter, self).__init__(**kwargs) self.usage = kwargs['usage'] self.quota = kwargs.get('quota', None) @@ -4172,17 +5382,17 @@ class ScoringProfile(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the scoring profile. - :paramtype name: str - :keyword text_weights: Parameters that boost scoring based on text matches in certain index + :ivar name: Required. The name of the scoring profile. + :vartype name: str + :ivar text_weights: Parameters that boost scoring based on text matches in certain index fields. - :paramtype text_weights: ~azure.search.documents.indexes.models.TextWeights - :keyword functions: The collection of functions that influence the scoring of documents. - :paramtype functions: list[~azure.search.documents.indexes.models.ScoringFunction] - :keyword function_aggregation: A value indicating how the results of individual scoring - functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. - Possible values include: "sum", "average", "minimum", "maximum", "firstMatching". - :paramtype function_aggregation: str or + :vartype text_weights: ~azure.search.documents.indexes.models.TextWeights + :ivar functions: The collection of functions that influence the scoring of documents. + :vartype functions: list[~azure.search.documents.indexes.models.ScoringFunction] + :ivar function_aggregation: A value indicating how the results of individual scoring functions + should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Possible + values include: "sum", "average", "minimum", "maximum", "firstMatching". + :vartype function_aggregation: str or ~azure.search.documents.indexes.models.ScoringFunctionAggregation """ @@ -4201,6 +5411,20 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the scoring profile. + :paramtype name: str + :keyword text_weights: Parameters that boost scoring based on text matches in certain index + fields. + :paramtype text_weights: ~azure.search.documents.indexes.models.TextWeights + :keyword functions: The collection of functions that influence the scoring of documents. + :paramtype functions: list[~azure.search.documents.indexes.models.ScoringFunction] + :keyword function_aggregation: A value indicating how the results of individual scoring + functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. + Possible values include: "sum", "average", "minimum", "maximum", "firstMatching". + :paramtype function_aggregation: str or + ~azure.search.documents.indexes.models.ScoringFunctionAggregation + """ super(ScoringProfile, self).__init__(**kwargs) self.name = kwargs['name'] self.text_weights = kwargs.get('text_weights', None) @@ -4239,6 +5463,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchError, self).__init__(**kwargs) self.code = None self.message = None @@ -4250,43 +5476,43 @@ class SearchField(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the field, which must be unique within the fields - collection of the index or parent field. - :paramtype name: str - :keyword type: Required. The data type of the field. Possible values include: "Edm.String", + :ivar name: Required. The name of the field, which must be unique within the fields collection + of the index or parent field. + :vartype name: str + :ivar type: Required. The data type of the field. Possible values include: "Edm.String", "Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset", "Edm.GeographyPoint", "Edm.ComplexType". - :paramtype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType - :keyword key: A value indicating whether the field uniquely identifies documents in the index. + :vartype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType + :ivar key: A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type Edm.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is false for simple fields and null for complex fields. - :paramtype key: bool - :keyword retrievable: A value indicating whether the field can be returned in a search result. - You can disable this option if you want to use a field (for example, margin) as a filter, - sorting, or scoring mechanism but do not want the field to be visible to the end user. This - property must be true for key fields, and it must be null for complex fields. This property can - be changed on existing fields. Enabling this property does not cause any increase in index - storage requirements. Default is true for simple fields and null for complex fields. - :paramtype retrievable: bool - :keyword searchable: A value indicating whether the field is full-text searchable. This means - it will undergo analysis such as word-breaking during indexing. If you set a searchable field - to a value like "sunny day", internally it will be split into the individual tokens "sunny" and + :vartype key: bool + :ivar retrievable: A value indicating whether the field can be returned in a search result. You + can disable this option if you want to use a field (for example, margin) as a filter, sorting, + or scoring mechanism but do not want the field to be visible to the end user. This property + must be true for key fields, and it must be null for complex fields. This property can be + changed on existing fields. Enabling this property does not cause any increase in index storage + requirements. Default is true for simple fields and null for complex fields. + :vartype retrievable: bool + :ivar searchable: A value indicating whether the field is full-text searchable. This means it + will undergo analysis such as word-breaking during indexing. If you set a searchable field to a + value like "sunny day", internally it will be split into the individual tokens "sunny" and "day". This enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String) are searchable by default. This property must be false for simple fields of other non-string data types, and it must be null for complex fields. Note: searchable fields consume extra space in your index since Azure Cognitive Search will store an additional tokenized version of the field value for full-text searches. If you want to save space in your index and you don't need a field to be included in searches, set searchable to false. - :paramtype searchable: bool - :keyword filterable: A value indicating whether to enable the field to be referenced in $filter + :vartype searchable: bool + :ivar filterable: A value indicating whether to enable the field to be referenced in $filter queries. filterable differs from searchable in how strings are handled. Fields of type Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property must be null for complex fields. Default is true for simple fields and null for complex fields. - :paramtype filterable: bool - :keyword sortable: A value indicating whether to enable the field to be referenced in $orderby + :vartype filterable: bool + :ivar sortable: A value indicating whether to enable the field to be referenced in $orderby expressions. By default Azure Cognitive Search sorts results by score, but in many experiences users will want to sort by fields in the documents. A simple field can be sortable only if it is single-valued (it has a single value in the scope of the parent document). Simple collection @@ -4296,15 +5522,15 @@ class SearchField(msrest.serialization.Model): cannot be sortable and the sortable property must be null for such fields. The default for sortable is true for single-valued simple fields, false for multi-valued simple fields, and null for complex fields. - :paramtype sortable: bool - :keyword facetable: A value indicating whether to enable the field to be referenced in facet + :vartype sortable: bool + :ivar facetable: A value indicating whether to enable the field to be referenced in facet queries. Typically used in a presentation of search results that includes hit count by category (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields. - :paramtype facetable: bool - :keyword analyzer: The name of the analyzer to use for the field. This option can be used only + :vartype facetable: bool + :ivar analyzer: The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", @@ -4324,11 +5550,11 @@ class SearchField(msrest.serialization.Model): "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", "whitespace". - :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :keyword search_analyzer: The name of the analyzer used at search time for the field. This - option can be used only with searchable fields. It must be set together with indexAnalyzer and - it cannot be set together with the analyzer option. This property cannot be set to the name of - a language analyzer; use the analyzer property instead if you need a language analyzer. This + :vartype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :ivar search_analyzer: The name of the analyzer used at search time for the field. This option + can be used only with searchable fields. It must be set together with indexAnalyzer and it + cannot be set together with the analyzer option. This property cannot be set to the name of a + language analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be updated on an existing field. Must be null for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", @@ -4347,12 +5573,12 @@ class SearchField(msrest.serialization.Model): "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", "whitespace". - :paramtype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :keyword index_analyzer: The name of the analyzer used at indexing time for the field. This - option can be used only with searchable fields. It must be set together with searchAnalyzer and - it cannot be set together with the analyzer option. This property cannot be set to the name of - a language analyzer; use the analyzer property instead if you need a language analyzer. Once - the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. + :vartype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :ivar index_analyzer: The name of the analyzer used at indexing time for the field. This option + can be used only with searchable fields. It must be set together with searchAnalyzer and it + cannot be set together with the analyzer option. This property cannot be set to the name of a + language analyzer; use the analyzer property instead if you need a language analyzer. Once the + analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", @@ -4370,21 +5596,21 @@ class SearchField(msrest.serialization.Model): "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", "whitespace". - :paramtype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :keyword normalizer: The name of the normalizer to use for the field. This option can be used - only with fields with filterable, sortable, or facetable enabled. Once the normalizer is - chosen, it cannot be changed for the field. Must be null for complex fields. Possible values - include: "asciifolding", "elision", "lowercase", "standard", "uppercase". - :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName - :keyword synonym_maps: A list of the names of synonym maps to associate with this field. This + :vartype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :ivar normalizer: The name of the normalizer to use for the field. This option can be used only + with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it + cannot be changed for the field. Must be null for complex fields. Possible values include: + "asciifolding", "elision", "lowercase", "standard", "uppercase". + :vartype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName + :ivar synonym_maps: A list of the names of synonym maps to associate with this field. This option can be used only with searchable fields. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are expanded at query-time using the rules in the synonym map. This attribute can be changed on existing fields. Must be null or an empty collection for complex fields. - :paramtype synonym_maps: list[str] - :keyword fields: A list of sub-fields if this is a field of type Edm.ComplexType or + :vartype synonym_maps: list[str] + :ivar fields: A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields. - :paramtype fields: list[~azure.search.documents.indexes.models.SearchField] + :vartype fields: list[~azure.search.documents.indexes.models.SearchField] """ _validation = { @@ -4413,6 +5639,143 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the field, which must be unique within the fields + collection of the index or parent field. + :paramtype name: str + :keyword type: Required. The data type of the field. Possible values include: "Edm.String", + "Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset", + "Edm.GeographyPoint", "Edm.ComplexType". + :paramtype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType + :keyword key: A value indicating whether the field uniquely identifies documents in the index. + Exactly one top-level field in each index must be chosen as the key field and it must be of + type Edm.String. Key fields can be used to look up documents directly and update or delete + specific documents. Default is false for simple fields and null for complex fields. + :paramtype key: bool + :keyword retrievable: A value indicating whether the field can be returned in a search result. + You can disable this option if you want to use a field (for example, margin) as a filter, + sorting, or scoring mechanism but do not want the field to be visible to the end user. This + property must be true for key fields, and it must be null for complex fields. This property can + be changed on existing fields. Enabling this property does not cause any increase in index + storage requirements. Default is true for simple fields and null for complex fields. + :paramtype retrievable: bool + :keyword searchable: A value indicating whether the field is full-text searchable. This means + it will undergo analysis such as word-breaking during indexing. If you set a searchable field + to a value like "sunny day", internally it will be split into the individual tokens "sunny" and + "day". This enables full-text searches for these terms. Fields of type Edm.String or + Collection(Edm.String) are searchable by default. This property must be false for simple fields + of other non-string data types, and it must be null for complex fields. Note: searchable fields + consume extra space in your index since Azure Cognitive Search will store an additional + tokenized version of the field value for full-text searches. If you want to save space in your + index and you don't need a field to be included in searches, set searchable to false. + :paramtype searchable: bool + :keyword filterable: A value indicating whether to enable the field to be referenced in $filter + queries. filterable differs from searchable in how strings are handled. Fields of type + Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so + comparisons are for exact matches only. For example, if you set such a field f to "sunny day", + $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property + must be null for complex fields. Default is true for simple fields and null for complex fields. + :paramtype filterable: bool + :keyword sortable: A value indicating whether to enable the field to be referenced in $orderby + expressions. By default Azure Cognitive Search sorts results by score, but in many experiences + users will want to sort by fields in the documents. A simple field can be sortable only if it + is single-valued (it has a single value in the scope of the parent document). Simple collection + fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex + collections are also multi-valued, and therefore cannot be sortable. This is true whether it's + an immediate parent field, or an ancestor field, that's the complex collection. Complex fields + cannot be sortable and the sortable property must be null for such fields. The default for + sortable is true for single-valued simple fields, false for multi-valued simple fields, and + null for complex fields. + :paramtype sortable: bool + :keyword facetable: A value indicating whether to enable the field to be referenced in facet + queries. Typically used in a presentation of search results that includes hit count by category + (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so + on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or + Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple + fields. + :paramtype facetable: bool + :keyword analyzer: The name of the analyzer to use for the field. This option can be used only + with searchable fields and it can't be set together with either searchAnalyzer or + indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null + for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", + "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", + "pt-PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", "whitespace". + :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :keyword search_analyzer: The name of the analyzer used at search time for the field. This + option can be used only with searchable fields. It must be set together with indexAnalyzer and + it cannot be set together with the analyzer option. This property cannot be set to the name of + a language analyzer; use the analyzer property instead if you need a language analyzer. This + analyzer can be updated on an existing field. Must be null for complex fields. Possible values + include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", + "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", + "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", + "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", + "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", + "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", + "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", + "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", + "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", + "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", + "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", + "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", + "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :paramtype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :keyword index_analyzer: The name of the analyzer used at indexing time for the field. This + option can be used only with searchable fields. It must be set together with searchAnalyzer and + it cannot be set together with the analyzer option. This property cannot be set to the name of + a language analyzer; use the analyzer property instead if you need a language analyzer. Once + the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. + Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", + "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", + "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", + "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", + "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", + "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", + "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", + "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", + "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", + "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", + "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", + "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", + "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :paramtype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :keyword normalizer: The name of the normalizer to use for the field. This option can be used + only with fields with filterable, sortable, or facetable enabled. Once the normalizer is + chosen, it cannot be changed for the field. Must be null for complex fields. Possible values + include: "asciifolding", "elision", "lowercase", "standard", "uppercase". + :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName + :keyword synonym_maps: A list of the names of synonym maps to associate with this field. This + option can be used only with searchable fields. Currently only one synonym map per field is + supported. Assigning a synonym map to a field ensures that query terms targeting that field are + expanded at query-time using the rules in the synonym map. This attribute can be changed on + existing fields. Must be null or an empty collection for complex fields. + :paramtype synonym_maps: list[str] + :keyword fields: A list of sub-fields if this is a field of type Edm.ComplexType or + Collection(Edm.ComplexType). Must be null or empty for simple fields. + :paramtype fields: list[~azure.search.documents.indexes.models.SearchField] + """ super(SearchField, self).__init__(**kwargs) self.name = kwargs['name'] self.type = kwargs['type'] @@ -4435,31 +5798,31 @@ class SearchIndex(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the index. - :paramtype name: str - :keyword fields: Required. The fields of the index. - :paramtype fields: list[~azure.search.documents.indexes.models.SearchField] - :keyword scoring_profiles: The scoring profiles for the index. - :paramtype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile] - :keyword default_scoring_profile: The name of the scoring profile to use if none is specified - in the query. If this property is not set and no scoring profile is specified in the query, - then default scoring (tf-idf) will be used. - :paramtype default_scoring_profile: str - :keyword cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. - :paramtype cors_options: ~azure.search.documents.indexes.models.CorsOptions - :keyword suggesters: The suggesters for the index. - :paramtype suggesters: list[~azure.search.documents.indexes.models.Suggester] - :keyword analyzers: The analyzers for the index. - :paramtype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer] - :keyword tokenizers: The tokenizers for the index. - :paramtype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer] - :keyword token_filters: The token filters for the index. - :paramtype token_filters: list[~azure.search.documents.indexes.models.TokenFilter] - :keyword char_filters: The character filters for the index. - :paramtype char_filters: list[~azure.search.documents.indexes.models.CharFilter] - :keyword normalizers: The normalizers for the index. - :paramtype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer] - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + :ivar name: Required. The name of the index. + :vartype name: str + :ivar fields: Required. The fields of the index. + :vartype fields: list[~azure.search.documents.indexes.models.SearchField] + :ivar scoring_profiles: The scoring profiles for the index. + :vartype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile] + :ivar default_scoring_profile: The name of the scoring profile to use if none is specified in + the query. If this property is not set and no scoring profile is specified in the query, then + default scoring (tf-idf) will be used. + :vartype default_scoring_profile: str + :ivar cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. + :vartype cors_options: ~azure.search.documents.indexes.models.CorsOptions + :ivar suggesters: The suggesters for the index. + :vartype suggesters: list[~azure.search.documents.indexes.models.Suggester] + :ivar analyzers: The analyzers for the index. + :vartype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer] + :ivar tokenizers: The tokenizers for the index. + :vartype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer] + :ivar token_filters: The token filters for the index. + :vartype token_filters: list[~azure.search.documents.indexes.models.TokenFilter] + :ivar char_filters: The character filters for the index. + :vartype char_filters: list[~azure.search.documents.indexes.models.CharFilter] + :ivar normalizers: The normalizers for the index. + :vartype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer] + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive @@ -4467,14 +5830,14 @@ class SearchIndex(msrest.serialization.Model): needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :keyword similarity: The type of similarity algorithm to be used when scoring and ranking the + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :ivar similarity: The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. - :paramtype similarity: ~azure.search.documents.indexes.models.Similarity - :keyword e_tag: The ETag of the index. - :paramtype e_tag: str + :vartype similarity: ~azure.search.documents.indexes.models.Similarity + :ivar e_tag: The ETag of the index. + :vartype e_tag: str """ _validation = { @@ -4503,6 +5866,48 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the index. + :paramtype name: str + :keyword fields: Required. The fields of the index. + :paramtype fields: list[~azure.search.documents.indexes.models.SearchField] + :keyword scoring_profiles: The scoring profiles for the index. + :paramtype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile] + :keyword default_scoring_profile: The name of the scoring profile to use if none is specified + in the query. If this property is not set and no scoring profile is specified in the query, + then default scoring (tf-idf) will be used. + :paramtype default_scoring_profile: str + :keyword cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. + :paramtype cors_options: ~azure.search.documents.indexes.models.CorsOptions + :keyword suggesters: The suggesters for the index. + :paramtype suggesters: list[~azure.search.documents.indexes.models.Suggester] + :keyword analyzers: The analyzers for the index. + :paramtype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer] + :keyword tokenizers: The tokenizers for the index. + :paramtype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer] + :keyword token_filters: The token filters for the index. + :paramtype token_filters: list[~azure.search.documents.indexes.models.TokenFilter] + :keyword char_filters: The character filters for the index. + :paramtype char_filters: list[~azure.search.documents.indexes.models.CharFilter] + :keyword normalizers: The normalizers for the index. + :paramtype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer] + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your data when you + want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive + Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive + Search will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with + customer-managed keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :keyword similarity: The type of similarity algorithm to be used when scoring and ranking the + documents matching a search query. The similarity algorithm can only be defined at index + creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity + algorithm is used. + :paramtype similarity: ~azure.search.documents.indexes.models.Similarity + :keyword e_tag: The ETag of the index. + :paramtype e_tag: str + """ super(SearchIndex, self).__init__(**kwargs) self.name = kwargs['name'] self.fields = kwargs['fields'] @@ -4525,32 +5930,32 @@ class SearchIndexer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the indexer. - :paramtype name: str - :keyword description: The description of the indexer. - :paramtype description: str - :keyword data_source_name: Required. The name of the datasource from which this indexer reads + :ivar name: Required. The name of the indexer. + :vartype name: str + :ivar description: The description of the indexer. + :vartype description: str + :ivar data_source_name: Required. The name of the datasource from which this indexer reads data. - :paramtype data_source_name: str - :keyword skillset_name: The name of the skillset executing with this indexer. - :paramtype skillset_name: str - :keyword target_index_name: Required. The name of the index to which this indexer writes data. - :paramtype target_index_name: str - :keyword schedule: The schedule for this indexer. - :paramtype schedule: ~azure.search.documents.indexes.models.IndexingSchedule - :keyword parameters: Parameters for indexer execution. - :paramtype parameters: ~azure.search.documents.indexes.models.IndexingParameters - :keyword field_mappings: Defines mappings between fields in the data source and corresponding + :vartype data_source_name: str + :ivar skillset_name: The name of the skillset executing with this indexer. + :vartype skillset_name: str + :ivar target_index_name: Required. The name of the index to which this indexer writes data. + :vartype target_index_name: str + :ivar schedule: The schedule for this indexer. + :vartype schedule: ~azure.search.documents.indexes.models.IndexingSchedule + :ivar parameters: Parameters for indexer execution. + :vartype parameters: ~azure.search.documents.indexes.models.IndexingParameters + :ivar field_mappings: Defines mappings between fields in the data source and corresponding target fields in the index. - :paramtype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] - :keyword output_field_mappings: Output field mappings are applied after enrichment and - immediately before indexing. - :paramtype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] - :keyword is_disabled: A value indicating whether the indexer is disabled. Default is false. - :paramtype is_disabled: bool - :keyword e_tag: The ETag of the indexer. - :paramtype e_tag: str - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + :vartype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] + :ivar output_field_mappings: Output field mappings are applied after enrichment and immediately + before indexing. + :vartype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] + :ivar is_disabled: A value indicating whether the indexer is disabled. Default is false. + :vartype is_disabled: bool + :ivar e_tag: The ETag of the indexer. + :vartype e_tag: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your indexer definition (as well as indexer execution status) when you want full assurance that no one, not even Microsoft, can decrypt them in Azure Cognitive Search. Once you have encrypted your @@ -4559,10 +5964,10 @@ class SearchIndexer(msrest.serialization.Model): rotate your encryption key; Your indexer definition (and indexer execution status) will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :keyword cache: Adds caching to an enrichment pipeline to allow for incremental modification - steps without having to rebuild the index every time. - :paramtype cache: ~azure.search.documents.indexes.models.SearchIndexerCache + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :ivar cache: Adds caching to an enrichment pipeline to allow for incremental modification steps + without having to rebuild the index every time. + :vartype cache: ~azure.search.documents.indexes.models.SearchIndexerCache """ _validation = { @@ -4591,6 +5996,46 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the indexer. + :paramtype name: str + :keyword description: The description of the indexer. + :paramtype description: str + :keyword data_source_name: Required. The name of the datasource from which this indexer reads + data. + :paramtype data_source_name: str + :keyword skillset_name: The name of the skillset executing with this indexer. + :paramtype skillset_name: str + :keyword target_index_name: Required. The name of the index to which this indexer writes data. + :paramtype target_index_name: str + :keyword schedule: The schedule for this indexer. + :paramtype schedule: ~azure.search.documents.indexes.models.IndexingSchedule + :keyword parameters: Parameters for indexer execution. + :paramtype parameters: ~azure.search.documents.indexes.models.IndexingParameters + :keyword field_mappings: Defines mappings between fields in the data source and corresponding + target fields in the index. + :paramtype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] + :keyword output_field_mappings: Output field mappings are applied after enrichment and + immediately before indexing. + :paramtype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] + :keyword is_disabled: A value indicating whether the indexer is disabled. Default is false. + :paramtype is_disabled: bool + :keyword e_tag: The ETag of the indexer. + :paramtype e_tag: str + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your indexer + definition (as well as indexer execution status) when you want full assurance that no one, not + even Microsoft, can decrypt them in Azure Cognitive Search. Once you have encrypted your + indexer definition, it will always remain encrypted. Azure Cognitive Search will ignore + attempts to set this property to null. You can change this property as needed if you want to + rotate your encryption key; Your indexer definition (and indexer execution status) will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :keyword cache: Adds caching to an enrichment pipeline to allow for incremental modification + steps without having to rebuild the index every time. + :paramtype cache: ~azure.search.documents.indexes.models.SearchIndexerCache + """ super(SearchIndexer, self).__init__(**kwargs) self.name = kwargs['name'] self.description = kwargs.get('description', None) @@ -4610,11 +6055,11 @@ def __init__( class SearchIndexerCache(msrest.serialization.Model): """SearchIndexerCache. - :keyword storage_connection_string: The connection string to the storage account where the - cache data will be persisted. - :paramtype storage_connection_string: str - :keyword enable_reprocessing: Specifies whether incremental reprocessing is enabled. - :paramtype enable_reprocessing: bool + :ivar storage_connection_string: The connection string to the storage account where the cache + data will be persisted. + :vartype storage_connection_string: str + :ivar enable_reprocessing: Specifies whether incremental reprocessing is enabled. + :vartype enable_reprocessing: bool """ _attribute_map = { @@ -4626,6 +6071,13 @@ def __init__( self, **kwargs ): + """ + :keyword storage_connection_string: The connection string to the storage account where the + cache data will be persisted. + :paramtype storage_connection_string: str + :keyword enable_reprocessing: Specifies whether incremental reprocessing is enabled. + :paramtype enable_reprocessing: bool + """ super(SearchIndexerCache, self).__init__(**kwargs) self.storage_connection_string = kwargs.get('storage_connection_string', None) self.enable_reprocessing = kwargs.get('enable_reprocessing', None) @@ -4636,12 +6088,12 @@ class SearchIndexerDataContainer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the table or view (for Azure SQL data source) or - collection (for CosmosDB data source) that will be indexed. - :paramtype name: str - :keyword query: A query that is applied to this data container. The syntax and meaning of this + :ivar name: Required. The name of the table or view (for Azure SQL data source) or collection + (for CosmosDB data source) that will be indexed. + :vartype name: str + :ivar query: A query that is applied to this data container. The syntax and meaning of this parameter is datasource-specific. Not supported by Azure SQL datasources. - :paramtype query: str + :vartype query: str """ _validation = { @@ -4657,6 +6109,14 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the table or view (for Azure SQL data source) or + collection (for CosmosDB data source) that will be indexed. + :paramtype name: str + :keyword query: A query that is applied to this data container. The syntax and meaning of this + parameter is datasource-specific. Not supported by Azure SQL datasources. + :paramtype query: str + """ super(SearchIndexerDataContainer, self).__init__(**kwargs) self.name = kwargs['name'] self.query = kwargs.get('query', None) @@ -4670,9 +6130,9 @@ class SearchIndexerDataIdentity(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the identity.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by server. - :paramtype odata_type: str + :vartype odata_type: str """ _validation = { @@ -4691,6 +6151,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchIndexerDataIdentity, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] @@ -4700,9 +6162,9 @@ class SearchIndexerDataNoneIdentity(SearchIndexerDataIdentity): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the identity.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by server. - :paramtype odata_type: str + :vartype odata_type: str """ _validation = { @@ -4717,6 +6179,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchIndexerDataNoneIdentity, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SearchIndexerDataNoneIdentity' # type: str @@ -4726,31 +6190,31 @@ class SearchIndexerDataSource(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the datasource. - :paramtype name: str - :keyword description: The description of the datasource. - :paramtype description: str - :keyword type: Required. The type of the datasource. Possible values include: "azuresql", + :ivar name: Required. The name of the datasource. + :vartype name: str + :ivar description: The description of the datasource. + :vartype description: str + :ivar type: Required. The type of the datasource. Possible values include: "azuresql", "cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2". - :paramtype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType - :keyword credentials: Required. Credentials for the datasource. - :paramtype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials - :keyword container: Required. The data container for the datasource. - :paramtype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer - :keyword identity: An explicit managed identity to use for this datasource. If not specified - and the connection string is a managed identity, the system-assigned managed identity is used. - If not specified, the value remains unchanged. If "none" is specified, the value of this - property is cleared. - :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :keyword data_change_detection_policy: The data change detection policy for the datasource. - :paramtype data_change_detection_policy: + :vartype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType + :ivar credentials: Required. Credentials for the datasource. + :vartype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials + :ivar container: Required. The data container for the datasource. + :vartype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer + :ivar identity: An explicit managed identity to use for this datasource. If not specified and + the connection string is a managed identity, the system-assigned managed identity is used. If + not specified, the value remains unchanged. If "none" is specified, the value of this property + is cleared. + :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity + :ivar data_change_detection_policy: The data change detection policy for the datasource. + :vartype data_change_detection_policy: ~azure.search.documents.indexes.models.DataChangeDetectionPolicy - :keyword data_deletion_detection_policy: The data deletion detection policy for the datasource. - :paramtype data_deletion_detection_policy: + :ivar data_deletion_detection_policy: The data deletion detection policy for the datasource. + :vartype data_deletion_detection_policy: ~azure.search.documents.indexes.models.DataDeletionDetectionPolicy - :keyword e_tag: The ETag of the data source. - :paramtype e_tag: str - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + :ivar e_tag: The ETag of the data source. + :vartype e_tag: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your datasource definition when you want full assurance that no one, not even Microsoft, can decrypt your data source definition in Azure Cognitive Search. Once you have encrypted your data source @@ -4759,7 +6223,7 @@ class SearchIndexerDataSource(msrest.serialization.Model): encryption key; Your datasource definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey """ _validation = { @@ -4786,6 +6250,42 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the datasource. + :paramtype name: str + :keyword description: The description of the datasource. + :paramtype description: str + :keyword type: Required. The type of the datasource. Possible values include: "azuresql", + "cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2". + :paramtype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType + :keyword credentials: Required. Credentials for the datasource. + :paramtype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials + :keyword container: Required. The data container for the datasource. + :paramtype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer + :keyword identity: An explicit managed identity to use for this datasource. If not specified + and the connection string is a managed identity, the system-assigned managed identity is used. + If not specified, the value remains unchanged. If "none" is specified, the value of this + property is cleared. + :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity + :keyword data_change_detection_policy: The data change detection policy for the datasource. + :paramtype data_change_detection_policy: + ~azure.search.documents.indexes.models.DataChangeDetectionPolicy + :keyword data_deletion_detection_policy: The data deletion detection policy for the datasource. + :paramtype data_deletion_detection_policy: + ~azure.search.documents.indexes.models.DataDeletionDetectionPolicy + :keyword e_tag: The ETag of the data source. + :paramtype e_tag: str + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your datasource + definition when you want full assurance that no one, not even Microsoft, can decrypt your data + source definition in Azure Cognitive Search. Once you have encrypted your data source + definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set + this property to null. You can change this property as needed if you want to rotate your + encryption key; Your datasource definition will be unaffected. Encryption with customer-managed + keys is not available for free search services, and is only available for paid services created + on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + """ super(SearchIndexerDataSource, self).__init__(**kwargs) self.name = kwargs['name'] self.description = kwargs.get('description', None) @@ -4804,14 +6304,14 @@ class SearchIndexerDataUserAssignedIdentity(SearchIndexerDataIdentity): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the identity.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by server. - :paramtype odata_type: str - :keyword user_assigned_identity: Required. The fully qualified Azure resource Id of a user + :vartype odata_type: str + :ivar user_assigned_identity: Required. The fully qualified Azure resource Id of a user assigned managed identity typically in the form "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" that should have been assigned to the search service. - :paramtype user_assigned_identity: str + :vartype user_assigned_identity: str """ _validation = { @@ -4828,6 +6328,13 @@ def __init__( self, **kwargs ): + """ + :keyword user_assigned_identity: Required. The fully qualified Azure resource Id of a user + assigned managed identity typically in the form + "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" + that should have been assigned to the search service. + :paramtype user_assigned_identity: str + """ super(SearchIndexerDataUserAssignedIdentity, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SearchIndexerDataUserAssignedIdentity' # type: str self.user_assigned_identity = kwargs['user_assigned_identity'] @@ -4883,6 +6390,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchIndexerError, self).__init__(**kwargs) self.key = None self.error_message = None @@ -4897,11 +6406,11 @@ class SearchIndexerKnowledgeStore(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword storage_connection_string: Required. The connection string to the storage account + :ivar storage_connection_string: Required. The connection string to the storage account projections will be stored in. - :paramtype storage_connection_string: str - :keyword projections: Required. A list of additional projections to perform during indexing. - :paramtype projections: + :vartype storage_connection_string: str + :ivar projections: Required. A list of additional projections to perform during indexing. + :vartype projections: list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreProjection] """ @@ -4919,6 +6428,14 @@ def __init__( self, **kwargs ): + """ + :keyword storage_connection_string: Required. The connection string to the storage account + projections will be stored in. + :paramtype storage_connection_string: str + :keyword projections: Required. A list of additional projections to perform during indexing. + :paramtype projections: + list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreProjection] + """ super(SearchIndexerKnowledgeStore, self).__init__(**kwargs) self.storage_connection_string = kwargs['storage_connection_string'] self.projections = kwargs['projections'] @@ -4927,16 +6444,16 @@ def __init__( class SearchIndexerKnowledgeStoreProjectionSelector(msrest.serialization.Model): """Abstract class to share properties between concrete selectors. - :keyword reference_key_name: Name of reference key to different projection. - :paramtype reference_key_name: str - :keyword generated_key_name: Name of generated key to store projection under. - :paramtype generated_key_name: str - :keyword source: Source data to project. - :paramtype source: str - :keyword source_context: Source context for complex projections. - :paramtype source_context: str - :keyword inputs: Nested inputs for complex projections. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] """ _attribute_map = { @@ -4951,6 +6468,18 @@ def __init__( self, **kwargs ): + """ + :keyword reference_key_name: Name of reference key to different projection. + :paramtype reference_key_name: str + :keyword generated_key_name: Name of generated key to store projection under. + :paramtype generated_key_name: str + :keyword source: Source data to project. + :paramtype source: str + :keyword source_context: Source context for complex projections. + :paramtype source_context: str + :keyword inputs: Nested inputs for complex projections. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + """ super(SearchIndexerKnowledgeStoreProjectionSelector, self).__init__(**kwargs) self.reference_key_name = kwargs.get('reference_key_name', None) self.generated_key_name = kwargs.get('generated_key_name', None) @@ -4964,18 +6493,18 @@ class SearchIndexerKnowledgeStoreBlobProjectionSelector(SearchIndexerKnowledgeSt All required parameters must be populated in order to send to Azure. - :keyword reference_key_name: Name of reference key to different projection. - :paramtype reference_key_name: str - :keyword generated_key_name: Name of generated key to store projection under. - :paramtype generated_key_name: str - :keyword source: Source data to project. - :paramtype source: str - :keyword source_context: Source context for complex projections. - :paramtype source_context: str - :keyword inputs: Nested inputs for complex projections. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword storage_container: Required. Blob container to store projections in. - :paramtype storage_container: str + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar storage_container: Required. Blob container to store projections in. + :vartype storage_container: str """ _validation = { @@ -4995,6 +6524,20 @@ def __init__( self, **kwargs ): + """ + :keyword reference_key_name: Name of reference key to different projection. + :paramtype reference_key_name: str + :keyword generated_key_name: Name of generated key to store projection under. + :paramtype generated_key_name: str + :keyword source: Source data to project. + :paramtype source: str + :keyword source_context: Source context for complex projections. + :paramtype source_context: str + :keyword inputs: Nested inputs for complex projections. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword storage_container: Required. Blob container to store projections in. + :paramtype storage_container: str + """ super(SearchIndexerKnowledgeStoreBlobProjectionSelector, self).__init__(**kwargs) self.storage_container = kwargs['storage_container'] @@ -5004,18 +6547,18 @@ class SearchIndexerKnowledgeStoreFileProjectionSelector(SearchIndexerKnowledgeSt All required parameters must be populated in order to send to Azure. - :keyword reference_key_name: Name of reference key to different projection. - :paramtype reference_key_name: str - :keyword generated_key_name: Name of generated key to store projection under. - :paramtype generated_key_name: str - :keyword source: Source data to project. - :paramtype source: str - :keyword source_context: Source context for complex projections. - :paramtype source_context: str - :keyword inputs: Nested inputs for complex projections. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword storage_container: Required. Blob container to store projections in. - :paramtype storage_container: str + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar storage_container: Required. Blob container to store projections in. + :vartype storage_container: str """ _validation = { @@ -5035,6 +6578,20 @@ def __init__( self, **kwargs ): + """ + :keyword reference_key_name: Name of reference key to different projection. + :paramtype reference_key_name: str + :keyword generated_key_name: Name of generated key to store projection under. + :paramtype generated_key_name: str + :keyword source: Source data to project. + :paramtype source: str + :keyword source_context: Source context for complex projections. + :paramtype source_context: str + :keyword inputs: Nested inputs for complex projections. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword storage_container: Required. Blob container to store projections in. + :paramtype storage_container: str + """ super(SearchIndexerKnowledgeStoreFileProjectionSelector, self).__init__(**kwargs) @@ -5043,18 +6600,18 @@ class SearchIndexerKnowledgeStoreObjectProjectionSelector(SearchIndexerKnowledge All required parameters must be populated in order to send to Azure. - :keyword reference_key_name: Name of reference key to different projection. - :paramtype reference_key_name: str - :keyword generated_key_name: Name of generated key to store projection under. - :paramtype generated_key_name: str - :keyword source: Source data to project. - :paramtype source: str - :keyword source_context: Source context for complex projections. - :paramtype source_context: str - :keyword inputs: Nested inputs for complex projections. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword storage_container: Required. Blob container to store projections in. - :paramtype storage_container: str + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar storage_container: Required. Blob container to store projections in. + :vartype storage_container: str """ _validation = { @@ -5074,20 +6631,34 @@ def __init__( self, **kwargs ): + """ + :keyword reference_key_name: Name of reference key to different projection. + :paramtype reference_key_name: str + :keyword generated_key_name: Name of generated key to store projection under. + :paramtype generated_key_name: str + :keyword source: Source data to project. + :paramtype source: str + :keyword source_context: Source context for complex projections. + :paramtype source_context: str + :keyword inputs: Nested inputs for complex projections. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword storage_container: Required. Blob container to store projections in. + :paramtype storage_container: str + """ super(SearchIndexerKnowledgeStoreObjectProjectionSelector, self).__init__(**kwargs) class SearchIndexerKnowledgeStoreProjection(msrest.serialization.Model): """Container object for various projection selectors. - :keyword tables: Projections to Azure Table storage. - :paramtype tables: + :ivar tables: Projections to Azure Table storage. + :vartype tables: list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreTableProjectionSelector] - :keyword objects: Projections to Azure Blob storage. - :paramtype objects: + :ivar objects: Projections to Azure Blob storage. + :vartype objects: list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreObjectProjectionSelector] - :keyword files: Projections to Azure File storage. - :paramtype files: + :ivar files: Projections to Azure File storage. + :vartype files: list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreFileProjectionSelector] """ @@ -5101,6 +6672,17 @@ def __init__( self, **kwargs ): + """ + :keyword tables: Projections to Azure Table storage. + :paramtype tables: + list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreTableProjectionSelector] + :keyword objects: Projections to Azure Blob storage. + :paramtype objects: + list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreObjectProjectionSelector] + :keyword files: Projections to Azure File storage. + :paramtype files: + list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreFileProjectionSelector] + """ super(SearchIndexerKnowledgeStoreProjection, self).__init__(**kwargs) self.tables = kwargs.get('tables', None) self.objects = kwargs.get('objects', None) @@ -5112,18 +6694,18 @@ class SearchIndexerKnowledgeStoreTableProjectionSelector(SearchIndexerKnowledgeS All required parameters must be populated in order to send to Azure. - :keyword reference_key_name: Name of reference key to different projection. - :paramtype reference_key_name: str - :keyword generated_key_name: Name of generated key to store projection under. - :paramtype generated_key_name: str - :keyword source: Source data to project. - :paramtype source: str - :keyword source_context: Source context for complex projections. - :paramtype source_context: str - :keyword inputs: Nested inputs for complex projections. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword table_name: Required. Name of the Azure table to store projected data in. - :paramtype table_name: str + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar table_name: Required. Name of the Azure table to store projected data in. + :vartype table_name: str """ _validation = { @@ -5143,6 +6725,20 @@ def __init__( self, **kwargs ): + """ + :keyword reference_key_name: Name of reference key to different projection. + :paramtype reference_key_name: str + :keyword generated_key_name: Name of generated key to store projection under. + :paramtype generated_key_name: str + :keyword source: Source data to project. + :paramtype source: str + :keyword source_context: Source context for complex projections. + :paramtype source_context: str + :keyword inputs: Nested inputs for complex projections. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword table_name: Required. Name of the Azure table to store projected data in. + :paramtype table_name: str + """ super(SearchIndexerKnowledgeStoreTableProjectionSelector, self).__init__(**kwargs) self.table_name = kwargs['table_name'] @@ -5179,6 +6775,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchIndexerLimits, self).__init__(**kwargs) self.max_run_time = None self.max_document_extraction_size = None @@ -5190,22 +6788,22 @@ class SearchIndexerSkillset(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the skillset. - :paramtype name: str - :keyword description: The description of the skillset. - :paramtype description: str - :keyword skills: Required. A list of skills in the skillset. - :paramtype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill] - :keyword cognitive_services_account: Details about cognitive services to be used when running + :ivar name: Required. The name of the skillset. + :vartype name: str + :ivar description: The description of the skillset. + :vartype description: str + :ivar skills: Required. A list of skills in the skillset. + :vartype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill] + :ivar cognitive_services_account: Details about cognitive services to be used when running skills. - :paramtype cognitive_services_account: + :vartype cognitive_services_account: ~azure.search.documents.indexes.models.CognitiveServicesAccount - :keyword knowledge_store: Definition of additional projections to azure blob, table, or files, - of enriched data. - :paramtype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore - :keyword e_tag: The ETag of the skillset. - :paramtype e_tag: str - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + :ivar knowledge_store: Definition of additional projections to azure blob, table, or files, of + enriched data. + :vartype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore + :ivar e_tag: The ETag of the skillset. + :vartype e_tag: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your skillset definition when you want full assurance that no one, not even Microsoft, can decrypt your skillset definition in Azure Cognitive Search. Once you have encrypted your skillset @@ -5214,7 +6812,7 @@ class SearchIndexerSkillset(msrest.serialization.Model): encryption key; Your skillset definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey """ _validation = { @@ -5236,6 +6834,33 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the skillset. + :paramtype name: str + :keyword description: The description of the skillset. + :paramtype description: str + :keyword skills: Required. A list of skills in the skillset. + :paramtype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill] + :keyword cognitive_services_account: Details about cognitive services to be used when running + skills. + :paramtype cognitive_services_account: + ~azure.search.documents.indexes.models.CognitiveServicesAccount + :keyword knowledge_store: Definition of additional projections to azure blob, table, or files, + of enriched data. + :paramtype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore + :keyword e_tag: The ETag of the skillset. + :paramtype e_tag: str + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your skillset + definition when you want full assurance that no one, not even Microsoft, can decrypt your + skillset definition in Azure Cognitive Search. Once you have encrypted your skillset + definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set + this property to null. You can change this property as needed if you want to rotate your + encryption key; Your skillset definition will be unaffected. Encryption with customer-managed + keys is not available for free search services, and is only available for paid services created + on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + """ super(SearchIndexerSkillset, self).__init__(**kwargs) self.name = kwargs['name'] self.description = kwargs.get('description', None) @@ -5283,6 +6908,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchIndexerStatus, self).__init__(**kwargs) self.status = None self.last_result = None @@ -5333,6 +6960,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchIndexerWarning, self).__init__(**kwargs) self.key = None self.message = None @@ -5346,25 +6975,25 @@ class SearchResourceEncryptionKey(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword key_name: Required. The name of your Azure Key Vault key to be used to encrypt your + :ivar key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data + at rest. + :vartype key_name: str + :ivar key_version: Required. The version of your Azure Key Vault key to be used to encrypt your data at rest. - :paramtype key_name: str - :keyword key_version: Required. The version of your Azure Key Vault key to be used to encrypt - your data at rest. - :paramtype key_version: str - :keyword vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, - that contains the key to be used to encrypt your data at rest. An example URI might be + :vartype key_version: str + :ivar vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that + contains the key to be used to encrypt your data at rest. An example URI might be https://my-keyvault-name.vault.azure.net. - :paramtype vault_uri: str - :keyword access_credentials: Optional Azure Active Directory credentials used for accessing - your Azure Key Vault. Not required if using managed identity instead. - :paramtype access_credentials: + :vartype vault_uri: str + :ivar access_credentials: Optional Azure Active Directory credentials used for accessing your + Azure Key Vault. Not required if using managed identity instead. + :vartype access_credentials: ~azure.search.documents.indexes.models.AzureActiveDirectoryApplicationCredentials - :keyword identity: An explicit managed identity to use for this encryption key. If not - specified and the access credentials property is null, the system-assigned managed identity is - used. On update to the resource, if the explicit identity is unspecified, it remains unchanged. - If "none" is specified, the value of this property is cleared. - :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity + :ivar identity: An explicit managed identity to use for this encryption key. If not specified + and the access credentials property is null, the system-assigned managed identity is used. On + update to the resource, if the explicit identity is unspecified, it remains unchanged. If + "none" is specified, the value of this property is cleared. + :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity """ _validation = { @@ -5385,6 +7014,27 @@ def __init__( self, **kwargs ): + """ + :keyword key_name: Required. The name of your Azure Key Vault key to be used to encrypt your + data at rest. + :paramtype key_name: str + :keyword key_version: Required. The version of your Azure Key Vault key to be used to encrypt + your data at rest. + :paramtype key_version: str + :keyword vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, + that contains the key to be used to encrypt your data at rest. An example URI might be + https://my-keyvault-name.vault.azure.net. + :paramtype vault_uri: str + :keyword access_credentials: Optional Azure Active Directory credentials used for accessing + your Azure Key Vault. Not required if using managed identity instead. + :paramtype access_credentials: + ~azure.search.documents.indexes.models.AzureActiveDirectoryApplicationCredentials + :keyword identity: An explicit managed identity to use for this encryption key. If not + specified and the access credentials property is null, the system-assigned managed identity is + used. On update to the resource, if the explicit identity is unspecified, it remains unchanged. + If "none" is specified, the value of this property is cleared. + :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity + """ super(SearchResourceEncryptionKey, self).__init__(**kwargs) self.key_name = kwargs['key_name'] self.key_version = kwargs['key_version'] @@ -5398,30 +7048,29 @@ class SentimentSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", "ru", "es", "sv", "tr". - :paramtype default_language_code: str or + :vartype default_language_code: str or ~azure.search.documents.indexes.models.SentimentSkillLanguage """ @@ -5445,6 +7094,30 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", + "ru", "es", "sv", "tr". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.SentimentSkillLanguage + """ super(SentimentSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.SentimentSkill' # type: str self.default_language_code = kwargs.get('default_language_code', None) @@ -5455,36 +7128,35 @@ class SentimentSkillV3(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. - :paramtype default_language_code: str - :keyword include_opinion_mining: If set to true, the skill output will include information from + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. + :vartype default_language_code: str + :ivar include_opinion_mining: If set to true, the skill output will include information from Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated assessment (adjective) in the text. Default is false. - :paramtype include_opinion_mining: bool - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str + :vartype include_opinion_mining: bool + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It will default to the latest available when not specified. We recommend you do not specify + this value unless absolutely necessary. + :vartype model_version: str """ _validation = { @@ -5509,6 +7181,35 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + :paramtype default_language_code: str + :keyword include_opinion_mining: If set to true, the skill output will include information from + Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated + assessment (adjective) in the text. Default is false. + :paramtype include_opinion_mining: bool + :keyword model_version: The version of the model to use when calling the Text Analytics + service. It will default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary. + :paramtype model_version: str + """ super(SentimentSkillV3, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.V3.SentimentSkill' # type: str self.default_language_code = kwargs.get('default_language_code', None) @@ -5521,21 +7222,20 @@ class ServiceCounters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword document_counter: Required. Total number of documents across all indexes in the - service. - :paramtype document_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword index_counter: Required. Total number of indexes. - :paramtype index_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword indexer_counter: Required. Total number of indexers. - :paramtype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword data_source_counter: Required. Total number of data sources. - :paramtype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword storage_size_counter: Required. Total size of used storage in bytes. - :paramtype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword synonym_map_counter: Required. Total number of synonym maps. - :paramtype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword skillset_counter: Total number of skillsets. - :paramtype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar document_counter: Required. Total number of documents across all indexes in the service. + :vartype document_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar index_counter: Required. Total number of indexes. + :vartype index_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar indexer_counter: Required. Total number of indexers. + :vartype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar data_source_counter: Required. Total number of data sources. + :vartype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar storage_size_counter: Required. Total size of used storage in bytes. + :vartype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar synonym_map_counter: Required. Total number of synonym maps. + :vartype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar skillset_counter: Total number of skillsets. + :vartype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter """ _validation = { @@ -5561,6 +7261,23 @@ def __init__( self, **kwargs ): + """ + :keyword document_counter: Required. Total number of documents across all indexes in the + service. + :paramtype document_counter: ~azure.search.documents.indexes.models.ResourceCounter + :keyword index_counter: Required. Total number of indexes. + :paramtype index_counter: ~azure.search.documents.indexes.models.ResourceCounter + :keyword indexer_counter: Required. Total number of indexers. + :paramtype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter + :keyword data_source_counter: Required. Total number of data sources. + :paramtype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter + :keyword storage_size_counter: Required. Total size of used storage in bytes. + :paramtype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter + :keyword synonym_map_counter: Required. Total number of synonym maps. + :paramtype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter + :keyword skillset_counter: Total number of skillsets. + :paramtype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter + """ super(ServiceCounters, self).__init__(**kwargs) self.document_counter = kwargs['document_counter'] self.index_counter = kwargs['index_counter'] @@ -5574,17 +7291,17 @@ def __init__( class ServiceLimits(msrest.serialization.Model): """Represents various service level limits. - :keyword max_fields_per_index: The maximum allowed fields per index. - :paramtype max_fields_per_index: int - :keyword max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in - an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. - :paramtype max_field_nesting_depth_per_index: int - :keyword max_complex_collection_fields_per_index: The maximum number of fields of type + :ivar max_fields_per_index: The maximum allowed fields per index. + :vartype max_fields_per_index: int + :ivar max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in an + index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. + :vartype max_field_nesting_depth_per_index: int + :ivar max_complex_collection_fields_per_index: The maximum number of fields of type Collection(Edm.ComplexType) allowed in an index. - :paramtype max_complex_collection_fields_per_index: int - :keyword max_complex_objects_in_collections_per_document: The maximum number of objects in - complex collections allowed per document. - :paramtype max_complex_objects_in_collections_per_document: int + :vartype max_complex_collection_fields_per_index: int + :ivar max_complex_objects_in_collections_per_document: The maximum number of objects in complex + collections allowed per document. + :vartype max_complex_objects_in_collections_per_document: int """ _attribute_map = { @@ -5598,6 +7315,19 @@ def __init__( self, **kwargs ): + """ + :keyword max_fields_per_index: The maximum allowed fields per index. + :paramtype max_fields_per_index: int + :keyword max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in + an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. + :paramtype max_field_nesting_depth_per_index: int + :keyword max_complex_collection_fields_per_index: The maximum number of fields of type + Collection(Edm.ComplexType) allowed in an index. + :paramtype max_complex_collection_fields_per_index: int + :keyword max_complex_objects_in_collections_per_document: The maximum number of objects in + complex collections allowed per document. + :paramtype max_complex_objects_in_collections_per_document: int + """ super(ServiceLimits, self).__init__(**kwargs) self.max_fields_per_index = kwargs.get('max_fields_per_index', None) self.max_field_nesting_depth_per_index = kwargs.get('max_field_nesting_depth_per_index', None) @@ -5610,10 +7340,10 @@ class ServiceStatistics(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword counters: Required. Service level resource counters. - :paramtype counters: ~azure.search.documents.indexes.models.ServiceCounters - :keyword limits: Required. Service level general limits. - :paramtype limits: ~azure.search.documents.indexes.models.ServiceLimits + :ivar counters: Required. Service level resource counters. + :vartype counters: ~azure.search.documents.indexes.models.ServiceCounters + :ivar limits: Required. Service level general limits. + :vartype limits: ~azure.search.documents.indexes.models.ServiceLimits """ _validation = { @@ -5630,6 +7360,12 @@ def __init__( self, **kwargs ): + """ + :keyword counters: Required. Service level resource counters. + :paramtype counters: ~azure.search.documents.indexes.models.ServiceCounters + :keyword limits: Required. Service level general limits. + :paramtype limits: ~azure.search.documents.indexes.models.ServiceLimits + """ super(ServiceStatistics, self).__init__(**kwargs) self.counters = kwargs['counters'] self.limits = kwargs['limits'] @@ -5640,26 +7376,25 @@ class ShaperSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] """ _validation = { @@ -5681,6 +7416,25 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + """ super(ShaperSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Util.ShaperSkill' # type: str @@ -5690,31 +7444,31 @@ class ShingleTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword max_shingle_size: The maximum shingle size. Default and minimum value is 2. - :paramtype max_shingle_size: int - :keyword min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be - less than the value of maxShingleSize. - :paramtype min_shingle_size: int - :keyword output_unigrams: A value indicating whether the output stream will contain the input + :vartype name: str + :ivar max_shingle_size: The maximum shingle size. Default and minimum value is 2. + :vartype max_shingle_size: int + :ivar min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be less + than the value of maxShingleSize. + :vartype min_shingle_size: int + :ivar output_unigrams: A value indicating whether the output stream will contain the input tokens (unigrams) as well as shingles. Default is true. - :paramtype output_unigrams: bool - :keyword output_unigrams_if_no_shingles: A value indicating whether to output unigrams for - those times when no shingles are available. This property takes precedence when outputUnigrams - is set to false. Default is false. - :paramtype output_unigrams_if_no_shingles: bool - :keyword token_separator: The string to use when joining adjacent tokens to form a shingle. + :vartype output_unigrams: bool + :ivar output_unigrams_if_no_shingles: A value indicating whether to output unigrams for those + times when no shingles are available. This property takes precedence when outputUnigrams is set + to false. Default is false. + :vartype output_unigrams_if_no_shingles: bool + :ivar token_separator: The string to use when joining adjacent tokens to form a shingle. Default is a single space (" "). - :paramtype token_separator: str - :keyword filter_token: The string to insert for each position at which there is no token. - Default is an underscore ("_"). - :paramtype filter_token: str + :vartype token_separator: str + :ivar filter_token: The string to insert for each position at which there is no token. Default + is an underscore ("_"). + :vartype filter_token: str """ _validation = { @@ -5739,6 +7493,30 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_shingle_size: The maximum shingle size. Default and minimum value is 2. + :paramtype max_shingle_size: int + :keyword min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be + less than the value of maxShingleSize. + :paramtype min_shingle_size: int + :keyword output_unigrams: A value indicating whether the output stream will contain the input + tokens (unigrams) as well as shingles. Default is true. + :paramtype output_unigrams: bool + :keyword output_unigrams_if_no_shingles: A value indicating whether to output unigrams for + those times when no shingles are available. This property takes precedence when outputUnigrams + is set to false. Default is false. + :paramtype output_unigrams_if_no_shingles: bool + :keyword token_separator: The string to use when joining adjacent tokens to form a shingle. + Default is a single space (" "). + :paramtype token_separator: str + :keyword filter_token: The string to insert for each position at which there is no token. + Default is an underscore ("_"). + :paramtype filter_token: str + """ super(ShingleTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.ShingleTokenFilter' # type: str self.max_shingle_size = kwargs.get('max_shingle_size', 2) @@ -5749,23 +7527,46 @@ def __init__( self.filter_token = kwargs.get('filter_token', "_") +class SkillNames(msrest.serialization.Model): + """SkillNames. + + :ivar skill_names: the names of skills to be reset. + :vartype skill_names: list[str] + """ + + _attribute_map = { + 'skill_names': {'key': 'skillNames', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword skill_names: the names of skills to be reset. + :paramtype skill_names: list[str] + """ + super(SkillNames, self).__init__(**kwargs) + self.skill_names = kwargs.get('skill_names', None) + + class SnowballTokenFilter(TokenFilter): """A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword language: Required. The language to use. Possible values include: "armenian", - "basque", "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", - "hungarian", "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", - "russian", "spanish", "swedish", "turkish". - :paramtype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage + :vartype name: str + :ivar language: Required. The language to use. Possible values include: "armenian", "basque", + "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", "hungarian", + "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", "russian", + "spanish", "swedish", "turkish". + :vartype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage """ _validation = { @@ -5784,6 +7585,17 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword language: Required. The language to use. Possible values include: "armenian", + "basque", "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", + "hungarian", "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", + "russian", "spanish", "swedish", "turkish". + :paramtype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage + """ super(SnowballTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SnowballTokenFilter' # type: str self.language = kwargs['language'] @@ -5794,13 +7606,13 @@ class SoftDeleteColumnDeletionDetectionPolicy(DataDeletionDetectionPolicy): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the data deletion detection + :ivar odata_type: Required. Identifies the concrete type of the data deletion detection policy.Constant filled by server. - :paramtype odata_type: str - :keyword soft_delete_column_name: The name of the column to use for soft-deletion detection. - :paramtype soft_delete_column_name: str - :keyword soft_delete_marker_value: The marker value that identifies an item as deleted. - :paramtype soft_delete_marker_value: str + :vartype odata_type: str + :ivar soft_delete_column_name: The name of the column to use for soft-deletion detection. + :vartype soft_delete_column_name: str + :ivar soft_delete_marker_value: The marker value that identifies an item as deleted. + :vartype soft_delete_marker_value: str """ _validation = { @@ -5817,6 +7629,12 @@ def __init__( self, **kwargs ): + """ + :keyword soft_delete_column_name: The name of the column to use for soft-deletion detection. + :paramtype soft_delete_column_name: str + :keyword soft_delete_marker_value: The marker value that identifies an item as deleted. + :paramtype soft_delete_marker_value: str + """ super(SoftDeleteColumnDeletionDetectionPolicy, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy' # type: str self.soft_delete_column_name = kwargs.get('soft_delete_column_name', None) @@ -5828,35 +7646,34 @@ class SplitSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt". - :paramtype default_language_code: str or + :vartype default_language_code: str or ~azure.search.documents.indexes.models.SplitSkillLanguage - :keyword text_split_mode: A value indicating which split mode to perform. Possible values - include: "pages", "sentences". - :paramtype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode - :keyword maximum_page_length: The desired maximum page length. Default is 10000. - :paramtype maximum_page_length: int + :ivar text_split_mode: A value indicating which split mode to perform. Possible values include: + "pages", "sentences". + :vartype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode + :ivar maximum_page_length: The desired maximum page length. Default is 10000. + :vartype maximum_page_length: int """ _validation = { @@ -5881,6 +7698,34 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.SplitSkillLanguage + :keyword text_split_mode: A value indicating which split mode to perform. Possible values + include: "pages", "sentences". + :paramtype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode + :keyword maximum_page_length: The desired maximum page length. Default is 10000. + :paramtype maximum_page_length: int + """ super(SplitSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.SplitSkill' # type: str self.default_language_code = kwargs.get('default_language_code', None) @@ -5893,9 +7738,9 @@ class SqlIntegratedChangeTrackingPolicy(DataChangeDetectionPolicy): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the data change detection + :ivar odata_type: Required. Identifies the concrete type of the data change detection policy.Constant filled by server. - :paramtype odata_type: str + :vartype odata_type: str """ _validation = { @@ -5910,6 +7755,8 @@ def __init__( self, **kwargs ): + """ + """ super(SqlIntegratedChangeTrackingPolicy, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' # type: str @@ -5919,16 +7766,16 @@ class StemmerOverrideTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword rules: Required. A list of stemming rules in the following format: "word => stem", for + :vartype name: str + :ivar rules: Required. A list of stemming rules in the following format: "word => stem", for example: "ran => run". - :paramtype rules: list[str] + :vartype rules: list[str] """ _validation = { @@ -5947,6 +7794,15 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword rules: Required. A list of stemming rules in the following format: "word => stem", for + example: "ran => run". + :paramtype rules: list[str] + """ super(StemmerOverrideTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' # type: str self.rules = kwargs['rules'] @@ -5957,23 +7813,23 @@ class StemmerTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword language: Required. The language to use. Possible values include: "arabic", - "armenian", "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", - "dutchKp", "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", - "lovins", "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician", + :vartype name: str + :ivar language: Required. The language to use. Possible values include: "arabic", "armenian", + "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "dutchKp", + "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", "lovins", + "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician", "minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi", "hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani", "latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk", "portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian", "lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish". - :paramtype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage + :vartype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage """ _validation = { @@ -5992,6 +7848,22 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword language: Required. The language to use. Possible values include: "arabic", + "armenian", "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", + "dutchKp", "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", + "lovins", "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician", + "minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi", + "hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani", + "latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk", + "portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian", + "lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish". + :paramtype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage + """ super(StemmerTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.StemmerTokenFilter' # type: str self.language = kwargs['language'] @@ -6002,15 +7874,15 @@ class StopAnalyzer(LexicalAnalyzer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the analyzer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword stopwords: A list of stopwords. - :paramtype stopwords: list[str] + :vartype odata_type: str + :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar stopwords: A list of stopwords. + :vartype stopwords: list[str] """ _validation = { @@ -6028,6 +7900,14 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the analyzer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword stopwords: A list of stopwords. + :paramtype stopwords: list[str] + """ super(StopAnalyzer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.StopAnalyzer' # type: str self.stopwords = kwargs.get('stopwords', None) @@ -6038,29 +7918,29 @@ class StopwordsTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword stopwords: The list of stopwords. This property and the stopwords list property cannot + :vartype name: str + :ivar stopwords: The list of stopwords. This property and the stopwords list property cannot both be set. - :paramtype stopwords: list[str] - :keyword stopwords_list: A predefined list of stopwords to use. This property and the stopwords + :vartype stopwords: list[str] + :ivar stopwords_list: A predefined list of stopwords to use. This property and the stopwords property cannot both be set. Default is English. Possible values include: "arabic", "armenian", "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english", "finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian", "irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian", "sorani", "spanish", "swedish", "thai", "turkish". - :paramtype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList - :keyword ignore_case: A value indicating whether to ignore case. If true, all words are - converted to lower case first. Default is false. - :paramtype ignore_case: bool - :keyword remove_trailing_stop_words: A value indicating whether to ignore the last search term - if it's a stop word. Default is true. - :paramtype remove_trailing_stop_words: bool + :vartype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList + :ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted + to lower case first. Default is false. + :vartype ignore_case: bool + :ivar remove_trailing_stop_words: A value indicating whether to ignore the last search term if + it's a stop word. Default is true. + :vartype remove_trailing_stop_words: bool """ _validation = { @@ -6081,6 +7961,28 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword stopwords: The list of stopwords. This property and the stopwords list property cannot + both be set. + :paramtype stopwords: list[str] + :keyword stopwords_list: A predefined list of stopwords to use. This property and the stopwords + property cannot both be set. Default is English. Possible values include: "arabic", "armenian", + "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english", + "finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian", + "irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian", + "sorani", "spanish", "swedish", "thai", "turkish". + :paramtype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList + :keyword ignore_case: A value indicating whether to ignore case. If true, all words are + converted to lower case first. Default is false. + :paramtype ignore_case: bool + :keyword remove_trailing_stop_words: A value indicating whether to ignore the last search term + if it's a stop word. Default is true. + :paramtype remove_trailing_stop_words: bool + """ super(StopwordsTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.StopwordsTokenFilter' # type: str self.stopwords = kwargs.get('stopwords', None) @@ -6096,14 +7998,14 @@ class Suggester(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the suggester. - :paramtype name: str + :ivar name: Required. The name of the suggester. + :vartype name: str :ivar search_mode: A value indicating the capabilities of the suggester. Has constant value: "analyzingInfixMatching". :vartype search_mode: str - :keyword source_fields: Required. The list of field names to which the suggester applies. Each + :ivar source_fields: Required. The list of field names to which the suggester applies. Each field must be searchable. - :paramtype source_fields: list[str] + :vartype source_fields: list[str] """ _validation = { @@ -6124,6 +8026,13 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the suggester. + :paramtype name: str + :keyword source_fields: Required. The list of field names to which the suggester applies. Each + field must be searchable. + :paramtype source_fields: list[str] + """ super(Suggester, self).__init__(**kwargs) self.name = kwargs['name'] self.source_fields = kwargs['source_fields'] @@ -6136,15 +8045,15 @@ class SynonymMap(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the synonym map. - :paramtype name: str + :ivar name: Required. The name of the synonym map. + :vartype name: str :ivar format: The format of the synonym map. Only the 'solr' format is currently supported. Has constant value: "solr". :vartype format: str - :keyword synonyms: Required. A series of synonym rules in the specified synonym map format. The + :ivar synonyms: Required. A series of synonym rules in the specified synonym map format. The rules must be separated by newlines. - :paramtype synonyms: str - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + :vartype synonyms: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive @@ -6152,9 +8061,9 @@ class SynonymMap(msrest.serialization.Model): needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :keyword e_tag: The ETag of the synonym map. - :paramtype e_tag: str + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :ivar e_tag: The ETag of the synonym map. + :vartype e_tag: str """ _validation = { @@ -6177,6 +8086,24 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the synonym map. + :paramtype name: str + :keyword synonyms: Required. A series of synonym rules in the specified synonym map format. The + rules must be separated by newlines. + :paramtype synonyms: str + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your data when you + want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive + Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive + Search will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with + customer-managed keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :keyword e_tag: The ETag of the synonym map. + :paramtype e_tag: str + """ super(SynonymMap, self).__init__(**kwargs) self.name = kwargs['name'] self.synonyms = kwargs['synonyms'] @@ -6189,30 +8116,30 @@ class SynonymTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword synonyms: Required. A list of synonyms in following one of two formats: 1. incredible, + :vartype name: str + :ivar synonyms: Required. A list of synonyms in following one of two formats: 1. incredible, unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma separated list of equivalent words. Set the expand option to change how this list is interpreted. - :paramtype synonyms: list[str] - :keyword ignore_case: A value indicating whether to case-fold input for matching. Default is + :vartype synonyms: list[str] + :ivar ignore_case: A value indicating whether to case-fold input for matching. Default is false. - :paramtype ignore_case: bool - :keyword expand: A value indicating whether all words in the list of synonyms (if => notation - is not used) will map to one another. If true, all words in the list of synonyms (if => - notation is not used) will map to one another. The following list: incredible, unbelievable, - fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, + :vartype ignore_case: bool + :ivar expand: A value indicating whether all words in the list of synonyms (if => notation is + not used) will map to one another. If true, all words in the list of synonyms (if => notation + is not used) will map to one another. The following list: incredible, unbelievable, fabulous, + amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. Default is true. - :paramtype expand: bool + :vartype expand: bool """ _validation = { @@ -6233,6 +8160,29 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword synonyms: Required. A list of synonyms in following one of two formats: 1. incredible, + unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced + with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma + separated list of equivalent words. Set the expand option to change how this list is + interpreted. + :paramtype synonyms: list[str] + :keyword ignore_case: A value indicating whether to case-fold input for matching. Default is + false. + :paramtype ignore_case: bool + :keyword expand: A value indicating whether all words in the list of synonyms (if => notation + is not used) will map to one another. If true, all words in the list of synonyms (if => + notation is not used) will map to one another. The following list: incredible, unbelievable, + fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, + unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, + fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => + incredible. Default is true. + :paramtype expand: bool + """ super(SynonymTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SynonymTokenFilter' # type: str self.synonyms = kwargs['synonyms'] @@ -6245,21 +8195,21 @@ class TagScoringFunction(ScoringFunction): All required parameters must be populated in order to send to Azure. - :keyword type: Required. Indicates the type of function to use. Valid values include magnitude, + :ivar type: Required. Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case.Constant filled by server. - :paramtype type: str - :keyword field_name: Required. The name of the field used as input to the scoring function. - :paramtype field_name: str - :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal - to 1.0. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document + :vartype type: str + :ivar field_name: Required. The name of the field used as input to the scoring function. + :vartype field_name: str + :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", "logarithmic". - :paramtype interpolation: str or + :vartype interpolation: str or ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :keyword parameters: Required. Parameter values for the tag scoring function. - :paramtype parameters: ~azure.search.documents.indexes.models.TagScoringParameters + :ivar parameters: Required. Parameter values for the tag scoring function. + :vartype parameters: ~azure.search.documents.indexes.models.TagScoringParameters """ _validation = { @@ -6281,6 +8231,20 @@ def __init__( self, **kwargs ): + """ + :keyword field_name: Required. The name of the field used as input to the scoring function. + :paramtype field_name: str + :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal + to 1.0. + :paramtype boost: float + :keyword interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :paramtype interpolation: str or + ~azure.search.documents.indexes.models.ScoringFunctionInterpolation + :keyword parameters: Required. Parameter values for the tag scoring function. + :paramtype parameters: ~azure.search.documents.indexes.models.TagScoringParameters + """ super(TagScoringFunction, self).__init__(**kwargs) self.type = 'tag' # type: str self.parameters = kwargs['parameters'] @@ -6291,9 +8255,9 @@ class TagScoringParameters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword tags_parameter: Required. The name of the parameter passed in search queries to - specify the list of tags to compare against the target field. - :paramtype tags_parameter: str + :ivar tags_parameter: Required. The name of the parameter passed in search queries to specify + the list of tags to compare against the target field. + :vartype tags_parameter: str """ _validation = { @@ -6308,6 +8272,11 @@ def __init__( self, **kwargs ): + """ + :keyword tags_parameter: Required. The name of the parameter passed in search queries to + specify the list of tags to compare against the target field. + :paramtype tags_parameter: str + """ super(TagScoringParameters, self).__init__(**kwargs) self.tags_parameter = kwargs['tags_parameter'] @@ -6317,45 +8286,44 @@ class TextTranslationSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_to_language_code: Required. The language code to translate documents into for + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_to_language_code: Required. The language code to translate documents into for documents that don't specify the to language explicitly. Possible values include: "af", "ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa". - :paramtype default_to_language_code: str or + :vartype default_to_language_code: str or ~azure.search.documents.indexes.models.TextTranslationSkillLanguage - :keyword default_from_language_code: The language code to translate documents from for - documents that don't specify the from language explicitly. Possible values include: "af", "ar", - "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", - "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", - "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", - "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", - "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa". - :paramtype default_from_language_code: str or + :ivar default_from_language_code: The language code to translate documents from for documents + that don't specify the from language explicitly. Possible values include: "af", "ar", "bn", + "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", + "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", + "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br", + "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", + "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa". + :vartype default_from_language_code: str or ~azure.search.documents.indexes.models.TextTranslationSkillLanguage - :keyword suggested_from: The language code to translate documents from when neither the + :ivar suggested_from: The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the automatic language detection is unsuccessful. Default is en. Possible values include: "af", "ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", @@ -6364,7 +8332,7 @@ class TextTranslationSkill(SearchIndexerSkill): "pt", "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa". - :paramtype suggested_from: str or + :vartype suggested_from: str or ~azure.search.documents.indexes.models.TextTranslationSkillLanguage """ @@ -6391,6 +8359,54 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_to_language_code: Required. The language code to translate documents into for + documents that don't specify the to language explicitly. Possible values include: "af", "ar", + "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", + "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", + "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", + "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", + "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa". + :paramtype default_to_language_code: str or + ~azure.search.documents.indexes.models.TextTranslationSkillLanguage + :keyword default_from_language_code: The language code to translate documents from for + documents that don't specify the from language explicitly. Possible values include: "af", "ar", + "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", + "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", + "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", + "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", + "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa". + :paramtype default_from_language_code: str or + ~azure.search.documents.indexes.models.TextTranslationSkillLanguage + :keyword suggested_from: The language code to translate documents from when neither the + fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the + automatic language detection is unsuccessful. Default is en. Possible values include: "af", + "ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", + "fj", "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", + "sw", "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", + "pt", "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", + "ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", + "pa". + :paramtype suggested_from: str or + ~azure.search.documents.indexes.models.TextTranslationSkillLanguage + """ super(TextTranslationSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.TranslationSkill' # type: str self.default_to_language_code = kwargs['default_to_language_code'] @@ -6403,9 +8419,9 @@ class TextWeights(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword weights: Required. The dictionary of per-field weights to boost document scoring. The + :ivar weights: Required. The dictionary of per-field weights to boost document scoring. The keys are field names and the values are the weights for each field. - :paramtype weights: dict[str, float] + :vartype weights: dict[str, float] """ _validation = { @@ -6420,6 +8436,11 @@ def __init__( self, **kwargs ): + """ + :keyword weights: Required. The dictionary of per-field weights to boost document scoring. The + keys are field names and the values are the weights for each field. + :paramtype weights: dict[str, float] + """ super(TextWeights, self).__init__(**kwargs) self.weights = kwargs['weights'] @@ -6429,15 +8450,15 @@ class TruncateTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword length: The length at which terms will be truncated. Default and maximum is 300. - :paramtype length: int + :vartype name: str + :ivar length: The length at which terms will be truncated. Default and maximum is 300. + :vartype length: int """ _validation = { @@ -6456,6 +8477,14 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword length: The length at which terms will be truncated. Default and maximum is 300. + :paramtype length: int + """ super(TruncateTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.TruncateTokenFilter' # type: str self.length = kwargs.get('length', 300) @@ -6466,16 +8495,16 @@ class UaxUrlEmailTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int + :vartype max_token_length: int """ _validation = { @@ -6494,6 +8523,15 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :paramtype max_token_length: int + """ super(UaxUrlEmailTokenizer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.UaxUrlEmailTokenizer' # type: str self.max_token_length = kwargs.get('max_token_length', 255) @@ -6504,16 +8542,16 @@ class UniqueTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword only_on_same_position: A value indicating whether to remove duplicates only at the - same position. Default is false. - :paramtype only_on_same_position: bool + :vartype name: str + :ivar only_on_same_position: A value indicating whether to remove duplicates only at the same + position. Default is false. + :vartype only_on_same_position: bool """ _validation = { @@ -6531,6 +8569,15 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword only_on_same_position: A value indicating whether to remove duplicates only at the + same position. Default is false. + :paramtype only_on_same_position: bool + """ super(UniqueTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.UniqueTokenFilter' # type: str self.only_on_same_position = kwargs.get('only_on_same_position', False) @@ -6541,39 +8588,38 @@ class WebApiSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword uri: Required. The url for the Web API. - :paramtype uri: str - :keyword http_headers: The headers required to make the http request. - :paramtype http_headers: dict[str, str] - :keyword http_method: The method for the http request. - :paramtype http_method: str - :keyword timeout: The desired timeout for the request. Default is 30 seconds. - :paramtype timeout: ~datetime.timedelta - :keyword batch_size: The desired batch size which indicates number of documents. - :paramtype batch_size: int - :keyword degree_of_parallelism: If set, the number of parallel calls that can be made to the - Web API. - :paramtype degree_of_parallelism: int + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar uri: Required. The url for the Web API. + :vartype uri: str + :ivar http_headers: The headers required to make the http request. + :vartype http_headers: dict[str, str] + :ivar http_method: The method for the http request. + :vartype http_method: str + :ivar timeout: The desired timeout for the request. Default is 30 seconds. + :vartype timeout: ~datetime.timedelta + :ivar batch_size: The desired batch size which indicates number of documents. + :vartype batch_size: int + :ivar degree_of_parallelism: If set, the number of parallel calls that can be made to the Web + API. + :vartype degree_of_parallelism: int """ _validation = { @@ -6602,6 +8648,38 @@ def __init__( self, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword uri: Required. The url for the Web API. + :paramtype uri: str + :keyword http_headers: The headers required to make the http request. + :paramtype http_headers: dict[str, str] + :keyword http_method: The method for the http request. + :paramtype http_method: str + :keyword timeout: The desired timeout for the request. Default is 30 seconds. + :paramtype timeout: ~datetime.timedelta + :keyword batch_size: The desired batch size which indicates number of documents. + :paramtype batch_size: int + :keyword degree_of_parallelism: If set, the number of parallel calls that can be made to the + Web API. + :paramtype degree_of_parallelism: int + """ super(WebApiSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Custom.WebApiSkill' # type: str self.uri = kwargs['uri'] @@ -6617,44 +8695,43 @@ class WordDelimiterTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword generate_word_parts: A value indicating whether to generate part words. If set, causes + :vartype name: str + :ivar generate_word_parts: A value indicating whether to generate part words. If set, causes parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is true. - :paramtype generate_word_parts: bool - :keyword generate_number_parts: A value indicating whether to generate number subwords. Default - is true. - :paramtype generate_number_parts: bool - :keyword catenate_words: A value indicating whether maximum runs of word parts will be - catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default - is false. - :paramtype catenate_words: bool - :keyword catenate_numbers: A value indicating whether maximum runs of number parts will be + :vartype generate_word_parts: bool + :ivar generate_number_parts: A value indicating whether to generate number subwords. Default is + true. + :vartype generate_number_parts: bool + :ivar catenate_words: A value indicating whether maximum runs of word parts will be catenated. + For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false. + :vartype catenate_words: bool + :ivar catenate_numbers: A value indicating whether maximum runs of number parts will be catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. - :paramtype catenate_numbers: bool - :keyword catenate_all: A value indicating whether all subword parts will be catenated. For + :vartype catenate_numbers: bool + :ivar catenate_all: A value indicating whether all subword parts will be catenated. For example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. - :paramtype catenate_all: bool - :keyword split_on_case_change: A value indicating whether to split words on caseChange. For + :vartype catenate_all: bool + :ivar split_on_case_change: A value indicating whether to split words on caseChange. For example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. - :paramtype split_on_case_change: bool - :keyword preserve_original: A value indicating whether original words will be preserved and - added to the subword list. Default is false. - :paramtype preserve_original: bool - :keyword split_on_numerics: A value indicating whether to split on numbers. For example, if - this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. - :paramtype split_on_numerics: bool - :keyword stem_english_possessive: A value indicating whether to remove trailing "'s" for each + :vartype split_on_case_change: bool + :ivar preserve_original: A value indicating whether original words will be preserved and added + to the subword list. Default is false. + :vartype preserve_original: bool + :ivar split_on_numerics: A value indicating whether to split on numbers. For example, if this + is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. + :vartype split_on_numerics: bool + :ivar stem_english_possessive: A value indicating whether to remove trailing "'s" for each subword. Default is true. - :paramtype stem_english_possessive: bool - :keyword protected_words: A list of tokens to protect from being delimited. - :paramtype protected_words: list[str] + :vartype stem_english_possessive: bool + :ivar protected_words: A list of tokens to protect from being delimited. + :vartype protected_words: list[str] """ _validation = { @@ -6681,6 +8758,43 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword generate_word_parts: A value indicating whether to generate part words. If set, causes + parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is + true. + :paramtype generate_word_parts: bool + :keyword generate_number_parts: A value indicating whether to generate number subwords. Default + is true. + :paramtype generate_number_parts: bool + :keyword catenate_words: A value indicating whether maximum runs of word parts will be + catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default + is false. + :paramtype catenate_words: bool + :keyword catenate_numbers: A value indicating whether maximum runs of number parts will be + catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. + :paramtype catenate_numbers: bool + :keyword catenate_all: A value indicating whether all subword parts will be catenated. For + example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. + :paramtype catenate_all: bool + :keyword split_on_case_change: A value indicating whether to split words on caseChange. For + example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. + :paramtype split_on_case_change: bool + :keyword preserve_original: A value indicating whether original words will be preserved and + added to the subword list. Default is false. + :paramtype preserve_original: bool + :keyword split_on_numerics: A value indicating whether to split on numbers. For example, if + this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. + :paramtype split_on_numerics: bool + :keyword stem_english_possessive: A value indicating whether to remove trailing "'s" for each + subword. Default is true. + :paramtype stem_english_possessive: bool + :keyword protected_words: A list of tokens to protect from being delimited. + :paramtype protected_words: list[str] + """ super(WordDelimiterTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.WordDelimiterTokenFilter' # type: str self.generate_word_parts = kwargs.get('generate_word_parts', True) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py index 21b03c1ff677..4a8562656ef1 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py @@ -53,6 +53,8 @@ def __init__( self, **kwargs ): + """ + """ super(AnalyzedTokenInfo, self).__init__(**kwargs) self.token = None self.start_offset = None @@ -65,9 +67,9 @@ class AnalyzeRequest(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword text: Required. The text to break into tokens. - :paramtype text: str - :keyword analyzer: The name of the analyzer to use to break the given text. Possible values + :ivar text: Required. The text to break into tokens. + :vartype text: str + :ivar analyzer: The name of the analyzer to use to break the given text. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", @@ -85,20 +87,19 @@ class AnalyzeRequest(msrest.serialization.Model): "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", "whitespace". - :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :keyword tokenizer: The name of the tokenizer to use to break the given text. Possible values + :vartype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :ivar tokenizer: The name of the tokenizer to use to break the given text. Possible values include: "classic", "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace". - :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName - :keyword normalizer: The name of the normalizer to use to normalize the given text. Possible + :vartype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName + :ivar normalizer: The name of the normalizer to use to normalize the given text. Possible values include: "asciifolding", "elision", "lowercase", "standard", "uppercase". - :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName - :keyword token_filters: An optional list of token filters to use when breaking the given text. - :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] - :keyword char_filters: An optional list of character filters to use when breaking the given - text. - :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + :vartype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName + :ivar token_filters: An optional list of token filters to use when breaking the given text. + :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :ivar char_filters: An optional list of character filters to use when breaking the given text. + :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] """ _validation = { @@ -125,6 +126,42 @@ def __init__( char_filters: Optional[List[Union[str, "CharFilterName"]]] = None, **kwargs ): + """ + :keyword text: Required. The text to break into tokens. + :paramtype text: str + :keyword analyzer: The name of the analyzer to use to break the given text. Possible values + include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", + "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", + "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", + "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", + "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", + "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", + "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", + "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", + "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", + "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", + "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", + "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", + "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :keyword tokenizer: The name of the tokenizer to use to break the given text. Possible values + include: "classic", "edgeNGram", "keyword_v2", "letter", "lowercase", + "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram", + "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace". + :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName + :keyword normalizer: The name of the normalizer to use to normalize the given text. Possible + values include: "asciifolding", "elision", "lowercase", "standard", "uppercase". + :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName + :keyword token_filters: An optional list of token filters to use when breaking the given text. + :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :keyword char_filters: An optional list of character filters to use when breaking the given + text. + :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + """ super(AnalyzeRequest, self).__init__(**kwargs) self.text = text self.analyzer = analyzer @@ -139,9 +176,8 @@ class AnalyzeResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword tokens: Required. The list of tokens returned by the analyzer specified in the - request. - :paramtype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo] + :ivar tokens: Required. The list of tokens returned by the analyzer specified in the request. + :vartype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo] """ _validation = { @@ -158,6 +194,11 @@ def __init__( tokens: List["AnalyzedTokenInfo"], **kwargs ): + """ + :keyword tokens: Required. The list of tokens returned by the analyzer specified in the + request. + :paramtype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo] + """ super(AnalyzeResult, self).__init__(**kwargs) self.tokens = tokens @@ -170,13 +211,13 @@ class TokenFilter(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str + :vartype name: str """ _validation = { @@ -199,6 +240,12 @@ def __init__( name: str, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + """ super(TokenFilter, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] self.name = name @@ -209,16 +256,16 @@ class AsciiFoldingTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword preserve_original: A value indicating whether the original token will be kept. Default - is false. - :paramtype preserve_original: bool + :vartype name: str + :ivar preserve_original: A value indicating whether the original token will be kept. Default is + false. + :vartype preserve_original: bool """ _validation = { @@ -239,6 +286,15 @@ def __init__( preserve_original: Optional[bool] = False, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword preserve_original: A value indicating whether the original token will be kept. Default + is false. + :paramtype preserve_original: bool + """ super(AsciiFoldingTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.AsciiFoldingTokenFilter' # type: str self.preserve_original = preserve_original @@ -249,12 +305,12 @@ class AzureActiveDirectoryApplicationCredentials(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword application_id: Required. An AAD Application ID that was granted the required access + :ivar application_id: Required. An AAD Application ID that was granted the required access permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The Application ID should not be confused with the Object ID for your AAD Application. - :paramtype application_id: str - :keyword application_secret: The authentication key of the specified AAD application. - :paramtype application_secret: str + :vartype application_id: str + :ivar application_secret: The authentication key of the specified AAD application. + :vartype application_secret: str """ _validation = { @@ -273,6 +329,14 @@ def __init__( application_secret: Optional[str] = None, **kwargs ): + """ + :keyword application_id: Required. An AAD Application ID that was granted the required access + permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The + Application ID should not be confused with the Object ID for your AAD Application. + :paramtype application_id: str + :keyword application_secret: The authentication key of the specified AAD application. + :paramtype application_secret: str + """ super(AzureActiveDirectoryApplicationCredentials, self).__init__(**kwargs) self.application_id = application_id self.application_secret = application_secret @@ -286,8 +350,8 @@ class Similarity(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Constant filled by server. - :paramtype odata_type: str + :ivar odata_type: Required. Constant filled by server. + :vartype odata_type: str """ _validation = { @@ -306,6 +370,8 @@ def __init__( self, **kwargs ): + """ + """ super(Similarity, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] @@ -315,16 +381,16 @@ class BM25Similarity(Similarity): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Constant filled by server. - :paramtype odata_type: str - :keyword k1: This property controls the scaling function between the term frequency of each + :ivar odata_type: Required. Constant filled by server. + :vartype odata_type: str + :ivar k1: This property controls the scaling function between the term frequency of each matching terms and the final relevance score of a document-query pair. By default, a value of 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. - :paramtype k1: float - :keyword b: This property controls how the length of a document affects the relevance score. By + :vartype k1: float + :ivar b: This property controls how the length of a document affects the relevance score. By default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, while a value of 1.0 means the score is fully normalized by the length of the document. - :paramtype b: float + :vartype b: float """ _validation = { @@ -344,6 +410,16 @@ def __init__( b: Optional[float] = None, **kwargs ): + """ + :keyword k1: This property controls the scaling function between the term frequency of each + matching terms and the final relevance score of a document-query pair. By default, a value of + 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. + :paramtype k1: float + :keyword b: This property controls how the length of a document affects the relevance score. By + default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, + while a value of 1.0 means the score is fully normalized by the length of the document. + :paramtype b: float + """ super(BM25Similarity, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.BM25Similarity' # type: str self.k1 = k1 @@ -358,13 +434,13 @@ class CharFilter(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the char filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the char filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str + :vartype name: str """ _validation = { @@ -387,6 +463,12 @@ def __init__( name: str, **kwargs ): + """ + :keyword name: Required. The name of the char filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + """ super(CharFilter, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] self.name = name @@ -397,19 +479,19 @@ class CjkBigramTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword ignore_scripts: The scripts to ignore. - :paramtype ignore_scripts: list[str or + :vartype name: str + :ivar ignore_scripts: The scripts to ignore. + :vartype ignore_scripts: list[str or ~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts] - :keyword output_unigrams: A value indicating whether to output both unigrams and bigrams (if + :ivar output_unigrams: A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. - :paramtype output_unigrams: bool + :vartype output_unigrams: bool """ _validation = { @@ -432,6 +514,18 @@ def __init__( output_unigrams: Optional[bool] = False, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword ignore_scripts: The scripts to ignore. + :paramtype ignore_scripts: list[str or + ~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts] + :keyword output_unigrams: A value indicating whether to output both unigrams and bigrams (if + true), or just bigrams (if false). Default is false. + :paramtype output_unigrams: bool + """ super(CjkBigramTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.CjkBigramTokenFilter' # type: str self.ignore_scripts = ignore_scripts @@ -443,8 +537,8 @@ class ClassicSimilarity(Similarity): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Constant filled by server. - :paramtype odata_type: str + :ivar odata_type: Required. Constant filled by server. + :vartype odata_type: str """ _validation = { @@ -459,6 +553,8 @@ def __init__( self, **kwargs ): + """ + """ super(ClassicSimilarity, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.ClassicSimilarity' # type: str @@ -471,13 +567,13 @@ class LexicalTokenizer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str """ _validation = { @@ -500,6 +596,12 @@ def __init__( name: str, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + """ super(LexicalTokenizer, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] self.name = name @@ -510,16 +612,16 @@ class ClassicTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int + :vartype max_token_length: int """ _validation = { @@ -541,6 +643,15 @@ def __init__( max_token_length: Optional[int] = 255, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :paramtype max_token_length: int + """ super(ClassicTokenizer, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.ClassicTokenizer' # type: str self.max_token_length = max_token_length @@ -554,11 +665,11 @@ class CognitiveServicesAccount(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the cognitive service resource + :ivar odata_type: Required. Identifies the concrete type of the cognitive service resource attached to a skillset.Constant filled by server. - :paramtype odata_type: str - :keyword description: Description of the cognitive service resource attached to a skillset. - :paramtype description: str + :vartype odata_type: str + :ivar description: Description of the cognitive service resource attached to a skillset. + :vartype description: str """ _validation = { @@ -580,6 +691,10 @@ def __init__( description: Optional[str] = None, **kwargs ): + """ + :keyword description: Description of the cognitive service resource attached to a skillset. + :paramtype description: str + """ super(CognitiveServicesAccount, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] self.description = description @@ -590,14 +705,14 @@ class CognitiveServicesAccountKey(CognitiveServicesAccount): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the cognitive service resource + :ivar odata_type: Required. Identifies the concrete type of the cognitive service resource attached to a skillset.Constant filled by server. - :paramtype odata_type: str - :keyword description: Description of the cognitive service resource attached to a skillset. - :paramtype description: str - :keyword key: Required. The key used to provision the cognitive service resource attached to a + :vartype odata_type: str + :ivar description: Description of the cognitive service resource attached to a skillset. + :vartype description: str + :ivar key: Required. The key used to provision the cognitive service resource attached to a skillset. - :paramtype key: str + :vartype key: str """ _validation = { @@ -618,6 +733,13 @@ def __init__( description: Optional[str] = None, **kwargs ): + """ + :keyword description: Description of the cognitive service resource attached to a skillset. + :paramtype description: str + :keyword key: Required. The key used to provision the cognitive service resource attached to a + skillset. + :paramtype key: str + """ super(CognitiveServicesAccountKey, self).__init__(description=description, **kwargs) self.odata_type = '#Microsoft.Azure.Search.CognitiveServicesByKey' # type: str self.key = key @@ -628,22 +750,22 @@ class CommonGramTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword common_words: Required. The set of common words. - :paramtype common_words: list[str] - :keyword ignore_case: A value indicating whether common words matching will be case - insensitive. Default is false. - :paramtype ignore_case: bool - :keyword use_query_mode: A value that indicates whether the token filter is in query mode. When - in query mode, the token filter generates bigrams and then removes common words and single - terms followed by a common word. Default is false. - :paramtype use_query_mode: bool + :vartype name: str + :ivar common_words: Required. The set of common words. + :vartype common_words: list[str] + :ivar ignore_case: A value indicating whether common words matching will be case insensitive. + Default is false. + :vartype ignore_case: bool + :ivar use_query_mode: A value that indicates whether the token filter is in query mode. When in + query mode, the token filter generates bigrams and then removes common words and single terms + followed by a common word. Default is false. + :vartype use_query_mode: bool """ _validation = { @@ -669,6 +791,21 @@ def __init__( use_query_mode: Optional[bool] = False, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword common_words: Required. The set of common words. + :paramtype common_words: list[str] + :keyword ignore_case: A value indicating whether common words matching will be case + insensitive. Default is false. + :paramtype ignore_case: bool + :keyword use_query_mode: A value that indicates whether the token filter is in query mode. When + in query mode, the token filter generates bigrams and then removes common words and single + terms followed by a common word. Default is false. + :paramtype use_query_mode: bool + """ super(CommonGramTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.CommonGramTokenFilter' # type: str self.common_words = common_words @@ -684,26 +821,25 @@ class SearchIndexerSkill(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] """ _validation = { @@ -735,6 +871,25 @@ def __init__( context: Optional[str] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + """ super(SearchIndexerSkill, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] self.name = name @@ -749,26 +904,25 @@ class ConditionalSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] """ _validation = { @@ -796,6 +950,25 @@ def __init__( context: Optional[str] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + """ super(ConditionalSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Util.ConditionalSkill' # type: str @@ -805,14 +978,14 @@ class CorsOptions(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword allowed_origins: Required. The list of origins from which JavaScript code will be - granted access to your index. Can contain a list of hosts of the form + :ivar allowed_origins: Required. The list of origins from which JavaScript code will be granted + access to your index. Can contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). - :paramtype allowed_origins: list[str] - :keyword max_age_in_seconds: The duration for which browsers should cache CORS preflight + :vartype allowed_origins: list[str] + :ivar max_age_in_seconds: The duration for which browsers should cache CORS preflight responses. Defaults to 5 minutes. - :paramtype max_age_in_seconds: long + :vartype max_age_in_seconds: long """ _validation = { @@ -831,6 +1004,16 @@ def __init__( max_age_in_seconds: Optional[int] = None, **kwargs ): + """ + :keyword allowed_origins: Required. The list of origins from which JavaScript code will be + granted access to your index. Can contain a list of hosts of the form + {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not + recommended). + :paramtype allowed_origins: list[str] + :keyword max_age_in_seconds: The duration for which browsers should cache CORS preflight + responses. Defaults to 5 minutes. + :paramtype max_age_in_seconds: long + """ super(CorsOptions, self).__init__(**kwargs) self.allowed_origins = allowed_origins self.max_age_in_seconds = max_age_in_seconds @@ -844,13 +1027,13 @@ class LexicalAnalyzer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the analyzer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str + :vartype odata_type: str + :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str """ _validation = { @@ -873,6 +1056,12 @@ def __init__( name: str, **kwargs ): + """ + :keyword name: Required. The name of the analyzer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + """ super(LexicalAnalyzer, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] self.name = name @@ -883,27 +1072,27 @@ class CustomAnalyzer(LexicalAnalyzer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the analyzer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword tokenizer: Required. The name of the tokenizer to use to divide continuous text into a + :vartype odata_type: str + :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar tokenizer: Required. The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. Possible values include: "classic", "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace". - :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName - :keyword token_filters: A list of token filters used to filter out or modify the tokens - generated by a tokenizer. For example, you can specify a lowercase filter that converts all - characters to lowercase. The filters are run in the order in which they are listed. - :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] - :keyword char_filters: A list of character filters used to prepare input text before it is + :vartype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName + :ivar token_filters: A list of token filters used to filter out or modify the tokens generated + by a tokenizer. For example, you can specify a lowercase filter that converts all characters to + lowercase. The filters are run in the order in which they are listed. + :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :ivar char_filters: A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. - :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] """ _validation = { @@ -929,6 +1118,26 @@ def __init__( char_filters: Optional[List[Union[str, "CharFilterName"]]] = None, **kwargs ): + """ + :keyword name: Required. The name of the analyzer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword tokenizer: Required. The name of the tokenizer to use to divide continuous text into a + sequence of tokens, such as breaking a sentence into words. Possible values include: "classic", + "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", + "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", + "standard_v2", "uax_url_email", "whitespace". + :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName + :keyword token_filters: A list of token filters used to filter out or modify the tokens + generated by a tokenizer. For example, you can specify a lowercase filter that converts all + characters to lowercase. The filters are run in the order in which they are listed. + :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :keyword char_filters: A list of character filters used to prepare input text before it is + processed by the tokenizer. For instance, they can replace certain characters or symbols. The + filters are run in the order in which they are listed. + :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + """ super(CustomAnalyzer, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer' # type: str self.tokenizer = tokenizer @@ -941,51 +1150,51 @@ class CustomEntity(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The top-level entity descriptor. Matches in the skill output will be + :ivar name: Required. The top-level entity descriptor. Matches in the skill output will be grouped by this name, and it should represent the "normalized" form of the text being found. - :paramtype name: str - :keyword description: This field can be used as a passthrough for custom metadata about the + :vartype name: str + :ivar description: This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. - :paramtype description: str - :keyword type: This field can be used as a passthrough for custom metadata about the matched + :vartype description: str + :ivar type: This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. - :paramtype type: str - :keyword subtype: This field can be used as a passthrough for custom metadata about the matched + :vartype type: str + :ivar subtype: This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. - :paramtype subtype: str - :keyword id: This field can be used as a passthrough for custom metadata about the matched + :vartype subtype: str + :ivar id: This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. - :paramtype id: str - :keyword case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the + :vartype id: str + :ivar case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to character casing. Sample case insensitive matches of "Microsoft" could be: microsoft, microSoft, MICROSOFT. - :paramtype case_sensitive: bool - :keyword accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with - the entity name should be sensitive to accent. - :paramtype accent_sensitive: bool - :keyword fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number - of divergent characters that would still constitute a match with the entity name. The smallest + :vartype case_sensitive: bool + :ivar accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with the + entity name should be sensitive to accent. + :vartype accent_sensitive: bool + :ivar fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number of + divergent characters that would still constitute a match with the entity name. The smallest possible fuzziness for any given match is returned. For instance, if the edit distance is set to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but otherwise do. - :paramtype fuzzy_edit_distance: int - :keyword default_case_sensitive: Changes the default case sensitivity value for this entity. It - be used to change the default value of all aliases caseSensitive values. - :paramtype default_case_sensitive: bool - :keyword default_accent_sensitive: Changes the default accent sensitivity value for this - entity. It be used to change the default value of all aliases accentSensitive values. - :paramtype default_accent_sensitive: bool - :keyword default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this + :vartype fuzzy_edit_distance: int + :ivar default_case_sensitive: Changes the default case sensitivity value for this entity. It be + used to change the default value of all aliases caseSensitive values. + :vartype default_case_sensitive: bool + :ivar default_accent_sensitive: Changes the default accent sensitivity value for this entity. + It be used to change the default value of all aliases accentSensitive values. + :vartype default_accent_sensitive: bool + :ivar default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this entity. It can be used to change the default value of all aliases fuzzyEditDistance values. - :paramtype default_fuzzy_edit_distance: int - :keyword aliases: An array of complex objects that can be used to specify alternative spellings - or synonyms to the root entity name. - :paramtype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias] + :vartype default_fuzzy_edit_distance: int + :ivar aliases: An array of complex objects that can be used to specify alternative spellings or + synonyms to the root entity name. + :vartype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias] """ _validation = { @@ -1024,6 +1233,53 @@ def __init__( aliases: Optional[List["CustomEntityAlias"]] = None, **kwargs ): + """ + :keyword name: Required. The top-level entity descriptor. Matches in the skill output will be + grouped by this name, and it should represent the "normalized" form of the text being found. + :paramtype name: str + :keyword description: This field can be used as a passthrough for custom metadata about the + matched text(s). The value of this field will appear with every match of its entity in the + skill output. + :paramtype description: str + :keyword type: This field can be used as a passthrough for custom metadata about the matched + text(s). The value of this field will appear with every match of its entity in the skill + output. + :paramtype type: str + :keyword subtype: This field can be used as a passthrough for custom metadata about the matched + text(s). The value of this field will appear with every match of its entity in the skill + output. + :paramtype subtype: str + :keyword id: This field can be used as a passthrough for custom metadata about the matched + text(s). The value of this field will appear with every match of its entity in the skill + output. + :paramtype id: str + :keyword case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the + entity name should be sensitive to character casing. Sample case insensitive matches of + "Microsoft" could be: microsoft, microSoft, MICROSOFT. + :paramtype case_sensitive: bool + :keyword accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with + the entity name should be sensitive to accent. + :paramtype accent_sensitive: bool + :keyword fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number + of divergent characters that would still constitute a match with the entity name. The smallest + possible fuzziness for any given match is returned. For instance, if the edit distance is set + to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case + sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but + otherwise do. + :paramtype fuzzy_edit_distance: int + :keyword default_case_sensitive: Changes the default case sensitivity value for this entity. It + be used to change the default value of all aliases caseSensitive values. + :paramtype default_case_sensitive: bool + :keyword default_accent_sensitive: Changes the default accent sensitivity value for this + entity. It be used to change the default value of all aliases accentSensitive values. + :paramtype default_accent_sensitive: bool + :keyword default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this + entity. It can be used to change the default value of all aliases fuzzyEditDistance values. + :paramtype default_fuzzy_edit_distance: int + :keyword aliases: An array of complex objects that can be used to specify alternative spellings + or synonyms to the root entity name. + :paramtype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias] + """ super(CustomEntity, self).__init__(**kwargs) self.name = name self.description = description @@ -1044,14 +1300,14 @@ class CustomEntityAlias(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword text: Required. The text of the alias. - :paramtype text: str - :keyword case_sensitive: Determine if the alias is case sensitive. - :paramtype case_sensitive: bool - :keyword accent_sensitive: Determine if the alias is accent sensitive. - :paramtype accent_sensitive: bool - :keyword fuzzy_edit_distance: Determine the fuzzy edit distance of the alias. - :paramtype fuzzy_edit_distance: int + :ivar text: Required. The text of the alias. + :vartype text: str + :ivar case_sensitive: Determine if the alias is case sensitive. + :vartype case_sensitive: bool + :ivar accent_sensitive: Determine if the alias is accent sensitive. + :vartype accent_sensitive: bool + :ivar fuzzy_edit_distance: Determine the fuzzy edit distance of the alias. + :vartype fuzzy_edit_distance: int """ _validation = { @@ -1074,6 +1330,16 @@ def __init__( fuzzy_edit_distance: Optional[int] = None, **kwargs ): + """ + :keyword text: Required. The text of the alias. + :paramtype text: str + :keyword case_sensitive: Determine if the alias is case sensitive. + :paramtype case_sensitive: bool + :keyword accent_sensitive: Determine if the alias is accent sensitive. + :paramtype accent_sensitive: bool + :keyword fuzzy_edit_distance: Determine the fuzzy edit distance of the alias. + :paramtype fuzzy_edit_distance: int + """ super(CustomEntityAlias, self).__init__(**kwargs) self.text = text self.case_sensitive = case_sensitive @@ -1086,47 +1352,45 @@ class CustomEntityLookupSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt". - :paramtype default_language_code: str or + :vartype default_language_code: str or ~azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage - :keyword entities_definition_uri: Path to a JSON or CSV file containing all the target text to + :ivar entities_definition_uri: Path to a JSON or CSV file containing all the target text to match against. This entity definition is read at the beginning of an indexer run. Any updates to this file during an indexer run will not take effect until subsequent runs. This config must be accessible over HTTPS. - :paramtype entities_definition_uri: str - :keyword inline_entities_definition: The inline CustomEntity definition. - :paramtype inline_entities_definition: - list[~azure.search.documents.indexes.models.CustomEntity] - :keyword global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is + :vartype entities_definition_uri: str + :ivar inline_entities_definition: The inline CustomEntity definition. + :vartype inline_entities_definition: list[~azure.search.documents.indexes.models.CustomEntity] + :ivar global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is not + set in CustomEntity, this value will be the default value. + :vartype global_default_case_sensitive: bool + :ivar global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive is not set in CustomEntity, this value will be the default value. - :paramtype global_default_case_sensitive: bool - :keyword global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive - is not set in CustomEntity, this value will be the default value. - :paramtype global_default_accent_sensitive: bool - :keyword global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If + :vartype global_default_accent_sensitive: bool + :ivar global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in CustomEntity, this value will be the default value. - :paramtype global_default_fuzzy_edit_distance: int + :vartype global_default_fuzzy_edit_distance: int """ _validation = { @@ -1166,6 +1430,46 @@ def __init__( global_default_fuzzy_edit_distance: Optional[int] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage + :keyword entities_definition_uri: Path to a JSON or CSV file containing all the target text to + match against. This entity definition is read at the beginning of an indexer run. Any updates + to this file during an indexer run will not take effect until subsequent runs. This config must + be accessible over HTTPS. + :paramtype entities_definition_uri: str + :keyword inline_entities_definition: The inline CustomEntity definition. + :paramtype inline_entities_definition: + list[~azure.search.documents.indexes.models.CustomEntity] + :keyword global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is + not set in CustomEntity, this value will be the default value. + :paramtype global_default_case_sensitive: bool + :keyword global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive + is not set in CustomEntity, this value will be the default value. + :paramtype global_default_accent_sensitive: bool + :keyword global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If + FuzzyEditDistance is not set in CustomEntity, this value will be the default value. + :paramtype global_default_fuzzy_edit_distance: int + """ super(CustomEntityLookupSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Text.CustomEntityLookupSkill' # type: str self.default_language_code = default_language_code @@ -1181,13 +1485,13 @@ class LexicalNormalizer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the normalizer. - :paramtype odata_type: str - :keyword name: Required. The name of the normalizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named - 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. - :paramtype name: str + :ivar odata_type: Required. Identifies the concrete type of the normalizer. + :vartype odata_type: str + :ivar name: Required. The name of the normalizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', + 'standard', 'lowercase', 'uppercase', or 'elision'. + :vartype name: str """ _validation = { @@ -1207,6 +1511,15 @@ def __init__( name: str, **kwargs ): + """ + :keyword odata_type: Required. Identifies the concrete type of the normalizer. + :paramtype odata_type: str + :keyword name: Required. The name of the normalizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named + 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. + :paramtype name: str + """ super(LexicalNormalizer, self).__init__(**kwargs) self.odata_type = odata_type self.name = name @@ -1217,21 +1530,21 @@ class CustomNormalizer(LexicalNormalizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the normalizer. - :paramtype odata_type: str - :keyword name: Required. The name of the normalizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named - 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. - :paramtype name: str - :keyword token_filters: A list of token filters used to filter out or modify the input token. - For example, you can specify a lowercase filter that converts all characters to lowercase. The + :ivar odata_type: Required. Identifies the concrete type of the normalizer. + :vartype odata_type: str + :ivar name: Required. The name of the normalizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', + 'standard', 'lowercase', 'uppercase', or 'elision'. + :vartype name: str + :ivar token_filters: A list of token filters used to filter out or modify the input token. For + example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. - :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] - :keyword char_filters: A list of character filters used to prepare input text before it is + :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :ivar char_filters: A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. - :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] """ _validation = { @@ -1255,6 +1568,23 @@ def __init__( char_filters: Optional[List[Union[str, "CharFilterName"]]] = None, **kwargs ): + """ + :keyword odata_type: Required. Identifies the concrete type of the normalizer. + :paramtype odata_type: str + :keyword name: Required. The name of the normalizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named + 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. + :paramtype name: str + :keyword token_filters: A list of token filters used to filter out or modify the input token. + For example, you can specify a lowercase filter that converts all characters to lowercase. The + filters are run in the order in which they are listed. + :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :keyword char_filters: A list of character filters used to prepare input text before it is + processed. For instance, they can replace certain characters or symbols. The filters are run in + the order in which they are listed. + :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + """ super(CustomNormalizer, self).__init__(odata_type=odata_type, name=name, **kwargs) self.token_filters = token_filters self.char_filters = char_filters @@ -1268,9 +1598,9 @@ class DataChangeDetectionPolicy(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the data change detection + :ivar odata_type: Required. Identifies the concrete type of the data change detection policy.Constant filled by server. - :paramtype odata_type: str + :vartype odata_type: str """ _validation = { @@ -1289,6 +1619,8 @@ def __init__( self, **kwargs ): + """ + """ super(DataChangeDetectionPolicy, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] @@ -1301,9 +1633,9 @@ class DataDeletionDetectionPolicy(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the data deletion detection + :ivar odata_type: Required. Identifies the concrete type of the data deletion detection policy.Constant filled by server. - :paramtype odata_type: str + :vartype odata_type: str """ _validation = { @@ -1322,6 +1654,8 @@ def __init__( self, **kwargs ): + """ + """ super(DataDeletionDetectionPolicy, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] @@ -1329,9 +1663,9 @@ def __init__( class DataSourceCredentials(msrest.serialization.Model): """Represents credentials that can be used to connect to a datasource. - :keyword connection_string: The connection string for the datasource. Set to - ':code:``' if you do not want the connection string updated. - :paramtype connection_string: str + :ivar connection_string: The connection string for the datasource. Set to ':code:``' + if you do not want the connection string updated. + :vartype connection_string: str """ _attribute_map = { @@ -1344,6 +1678,11 @@ def __init__( connection_string: Optional[str] = None, **kwargs ): + """ + :keyword connection_string: The connection string for the datasource. Set to + ':code:``' if you do not want the connection string updated. + :paramtype connection_string: str + """ super(DataSourceCredentials, self).__init__(**kwargs) self.connection_string = connection_string @@ -1353,11 +1692,11 @@ class DefaultCognitiveServicesAccount(CognitiveServicesAccount): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the cognitive service resource + :ivar odata_type: Required. Identifies the concrete type of the cognitive service resource attached to a skillset.Constant filled by server. - :paramtype odata_type: str - :keyword description: Description of the cognitive service resource attached to a skillset. - :paramtype description: str + :vartype odata_type: str + :ivar description: Description of the cognitive service resource attached to a skillset. + :vartype description: str """ _validation = { @@ -1375,6 +1714,10 @@ def __init__( description: Optional[str] = None, **kwargs ): + """ + :keyword description: Description of the cognitive service resource attached to a skillset. + :paramtype description: str + """ super(DefaultCognitiveServicesAccount, self).__init__(description=description, **kwargs) self.odata_type = '#Microsoft.Azure.Search.DefaultCognitiveServices' # type: str @@ -1384,27 +1727,27 @@ class DictionaryDecompounderTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword word_list: Required. The list of words to match against. - :paramtype word_list: list[str] - :keyword min_word_size: The minimum word size. Only words longer than this get processed. - Default is 5. Maximum is 300. - :paramtype min_word_size: int - :keyword min_subword_size: The minimum subword size. Only subwords longer than this are - outputted. Default is 2. Maximum is 300. - :paramtype min_subword_size: int - :keyword max_subword_size: The maximum subword size. Only subwords shorter than this are + :vartype name: str + :ivar word_list: Required. The list of words to match against. + :vartype word_list: list[str] + :ivar min_word_size: The minimum word size. Only words longer than this get processed. Default + is 5. Maximum is 300. + :vartype min_word_size: int + :ivar min_subword_size: The minimum subword size. Only subwords longer than this are outputted. + Default is 2. Maximum is 300. + :vartype min_subword_size: int + :ivar max_subword_size: The maximum subword size. Only subwords shorter than this are outputted. Default is 15. Maximum is 300. - :paramtype max_subword_size: int - :keyword only_longest_match: A value indicating whether to add only the longest matching - subword to the output. Default is false. - :paramtype only_longest_match: bool + :vartype max_subword_size: int + :ivar only_longest_match: A value indicating whether to add only the longest matching subword + to the output. Default is false. + :vartype only_longest_match: bool """ _validation = { @@ -1437,6 +1780,26 @@ def __init__( only_longest_match: Optional[bool] = False, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword word_list: Required. The list of words to match against. + :paramtype word_list: list[str] + :keyword min_word_size: The minimum word size. Only words longer than this get processed. + Default is 5. Maximum is 300. + :paramtype min_word_size: int + :keyword min_subword_size: The minimum subword size. Only subwords longer than this are + outputted. Default is 2. Maximum is 300. + :paramtype min_subword_size: int + :keyword max_subword_size: The maximum subword size. Only subwords shorter than this are + outputted. Default is 15. Maximum is 300. + :paramtype max_subword_size: int + :keyword only_longest_match: A value indicating whether to add only the longest matching + subword to the output. Default is false. + :paramtype only_longest_match: bool + """ super(DictionaryDecompounderTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' # type: str self.word_list = word_list @@ -1454,18 +1817,18 @@ class ScoringFunction(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword type: Required. Indicates the type of function to use. Valid values include magnitude, + :ivar type: Required. Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case.Constant filled by server. - :paramtype type: str - :keyword field_name: Required. The name of the field used as input to the scoring function. - :paramtype field_name: str - :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal - to 1.0. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document + :vartype type: str + :ivar field_name: Required. The name of the field used as input to the scoring function. + :vartype field_name: str + :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", "logarithmic". - :paramtype interpolation: str or + :vartype interpolation: str or ~azure.search.documents.indexes.models.ScoringFunctionInterpolation """ @@ -1494,6 +1857,18 @@ def __init__( interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None, **kwargs ): + """ + :keyword field_name: Required. The name of the field used as input to the scoring function. + :paramtype field_name: str + :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal + to 1.0. + :paramtype boost: float + :keyword interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :paramtype interpolation: str or + ~azure.search.documents.indexes.models.ScoringFunctionInterpolation + """ super(ScoringFunction, self).__init__(**kwargs) self.type = None # type: Optional[str] self.field_name = field_name @@ -1506,21 +1881,21 @@ class DistanceScoringFunction(ScoringFunction): All required parameters must be populated in order to send to Azure. - :keyword type: Required. Indicates the type of function to use. Valid values include magnitude, + :ivar type: Required. Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case.Constant filled by server. - :paramtype type: str - :keyword field_name: Required. The name of the field used as input to the scoring function. - :paramtype field_name: str - :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal - to 1.0. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document + :vartype type: str + :ivar field_name: Required. The name of the field used as input to the scoring function. + :vartype field_name: str + :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", "logarithmic". - :paramtype interpolation: str or + :vartype interpolation: str or ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :keyword parameters: Required. Parameter values for the distance scoring function. - :paramtype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters + :ivar parameters: Required. Parameter values for the distance scoring function. + :vartype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters """ _validation = { @@ -1547,6 +1922,20 @@ def __init__( interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None, **kwargs ): + """ + :keyword field_name: Required. The name of the field used as input to the scoring function. + :paramtype field_name: str + :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal + to 1.0. + :paramtype boost: float + :keyword interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :paramtype interpolation: str or + ~azure.search.documents.indexes.models.ScoringFunctionInterpolation + :keyword parameters: Required. Parameter values for the distance scoring function. + :paramtype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters + """ super(DistanceScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) self.type = 'distance' # type: str self.parameters = parameters @@ -1557,12 +1946,12 @@ class DistanceScoringParameters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword reference_point_parameter: Required. The name of the parameter passed in search - queries to specify the reference location. - :paramtype reference_point_parameter: str - :keyword boosting_distance: Required. The distance in kilometers from the reference location - where the boosting range ends. - :paramtype boosting_distance: float + :ivar reference_point_parameter: Required. The name of the parameter passed in search queries + to specify the reference location. + :vartype reference_point_parameter: str + :ivar boosting_distance: Required. The distance in kilometers from the reference location where + the boosting range ends. + :vartype boosting_distance: float """ _validation = { @@ -1582,6 +1971,14 @@ def __init__( boosting_distance: float, **kwargs ): + """ + :keyword reference_point_parameter: Required. The name of the parameter passed in search + queries to specify the reference location. + :paramtype reference_point_parameter: str + :keyword boosting_distance: Required. The distance in kilometers from the reference location + where the boosting range ends. + :paramtype boosting_distance: float + """ super(DistanceScoringParameters, self).__init__(**kwargs) self.reference_point_parameter = reference_point_parameter self.boosting_distance = boosting_distance @@ -1592,33 +1989,32 @@ class DocumentExtractionSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined. - :paramtype parsing_mode: str - :keyword data_to_extract: The type of data to be extracted for the skill. Will be set to + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined. + :vartype parsing_mode: str + :ivar data_to_extract: The type of data to be extracted for the skill. Will be set to 'contentAndMetadata' if not defined. - :paramtype data_to_extract: str - :keyword configuration: A dictionary of configurations for the skill. - :paramtype configuration: dict[str, any] + :vartype data_to_extract: str + :ivar configuration: A dictionary of configurations for the skill. + :vartype configuration: dict[str, any] """ _validation = { @@ -1652,6 +2048,32 @@ def __init__( configuration: Optional[Dict[str, Any]] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined. + :paramtype parsing_mode: str + :keyword data_to_extract: The type of data to be extracted for the skill. Will be set to + 'contentAndMetadata' if not defined. + :paramtype data_to_extract: str + :keyword configuration: A dictionary of configurations for the skill. + :paramtype configuration: dict[str, any] + """ super(DocumentExtractionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Util.DocumentExtractionSkill' # type: str self.parsing_mode = parsing_mode @@ -1659,26 +2081,58 @@ def __init__( self.configuration = configuration +class DocumentKeysOrIds(msrest.serialization.Model): + """DocumentKeysOrIds. + + :ivar document_keys: document keys to be reset. + :vartype document_keys: list[str] + :ivar datasource_document_ids: datasource document identifiers to be reset. + :vartype datasource_document_ids: list[str] + """ + + _attribute_map = { + 'document_keys': {'key': 'documentKeys', 'type': '[str]'}, + 'datasource_document_ids': {'key': 'datasourceDocumentIds', 'type': '[str]'}, + } + + def __init__( + self, + *, + document_keys: Optional[List[str]] = None, + datasource_document_ids: Optional[List[str]] = None, + **kwargs + ): + """ + :keyword document_keys: document keys to be reset. + :paramtype document_keys: list[str] + :keyword datasource_document_ids: datasource document identifiers to be reset. + :paramtype datasource_document_ids: list[str] + """ + super(DocumentKeysOrIds, self).__init__(**kwargs) + self.document_keys = document_keys + self.datasource_document_ids = datasource_document_ids + + class EdgeNGramTokenFilter(TokenFilter): """Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. - :paramtype max_gram: int - :keyword side: Specifies which side of the input the n-gram should be generated from. Default - is "front". Possible values include: "front", "back". - :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. + :vartype max_gram: int + :ivar side: Specifies which side of the input the n-gram should be generated from. Default is + "front". Possible values include: "front", "back". + :vartype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide """ _validation = { @@ -1703,6 +2157,20 @@ def __init__( side: Optional[Union[str, "EdgeNGramTokenFilterSide"]] = None, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + maxGram. + :paramtype min_gram: int + :keyword max_gram: The maximum n-gram length. Default is 2. + :paramtype max_gram: int + :keyword side: Specifies which side of the input the n-gram should be generated from. Default + is "front". Possible values include: "front", "back". + :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide + """ super(EdgeNGramTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilter' # type: str self.min_gram = min_gram @@ -1715,21 +2183,21 @@ class EdgeNGramTokenFilterV2(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than - the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :paramtype max_gram: int - :keyword side: Specifies which side of the input the n-gram should be generated from. Default - is "front". Possible values include: "front", "back". - :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :vartype max_gram: int + :ivar side: Specifies which side of the input the n-gram should be generated from. Default is + "front". Possible values include: "front", "back". + :vartype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide """ _validation = { @@ -1756,6 +2224,20 @@ def __init__( side: Optional[Union[str, "EdgeNGramTokenFilterSide"]] = None, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than + the value of maxGram. + :paramtype min_gram: int + :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :paramtype max_gram: int + :keyword side: Specifies which side of the input the n-gram should be generated from. Default + is "front". Possible values include: "front", "back". + :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide + """ super(EdgeNGramTokenFilterV2, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2' # type: str self.min_gram = min_gram @@ -1768,20 +2250,20 @@ class EdgeNGramTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than - the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :paramtype max_gram: int - :keyword token_chars: Character classes to keep in the tokens. - :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :vartype max_gram: int + :ivar token_chars: Character classes to keep in the tokens. + :vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] """ _validation = { @@ -1808,6 +2290,19 @@ def __init__( token_chars: Optional[List[Union[str, "TokenCharacterKind"]]] = None, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than + the value of maxGram. + :paramtype min_gram: int + :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :paramtype max_gram: int + :keyword token_chars: Character classes to keep in the tokens. + :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] + """ super(EdgeNGramTokenizer, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenizer' # type: str self.min_gram = min_gram @@ -1820,15 +2315,15 @@ class ElisionTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword articles: The set of articles to remove. - :paramtype articles: list[str] + :vartype name: str + :ivar articles: The set of articles to remove. + :vartype articles: list[str] """ _validation = { @@ -1849,6 +2344,14 @@ def __init__( articles: Optional[List[str]] = None, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword articles: The set of articles to remove. + :paramtype articles: list[str] + """ super(ElisionTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.ElisionTokenFilter' # type: str self.articles = articles @@ -1859,36 +2362,35 @@ class EntityLinkingSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. - :paramtype default_language_code: str - :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. + :vartype default_language_code: str + :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. - :paramtype minimum_precision: float - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str + :vartype minimum_precision: float + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It will default to the latest available when not specified. We recommend you do not specify + this value unless absolutely necessary. + :vartype model_version: str """ _validation = { @@ -1923,6 +2425,35 @@ def __init__( model_version: Optional[str] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + :paramtype default_language_code: str + :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence score is greater than the value specified. If not set (default), or if explicitly + set to null, all entities will be included. + :paramtype minimum_precision: float + :keyword model_version: The version of the model to use when calling the Text Analytics + service. It will default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary. + :paramtype model_version: str + """ super(EntityLinkingSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Text.V3.EntityLinkingSkill' # type: str self.default_language_code = default_language_code @@ -1935,42 +2466,41 @@ class EntityRecognitionSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword categories: A list of entity categories that should be extracted. - :paramtype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar categories: A list of entity categories that should be extracted. + :vartype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr". - :paramtype default_language_code: str or + :vartype default_language_code: str or ~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage - :keyword include_typeless_entities: Determines whether or not to include entities which are - well known but don't conform to a pre-defined type. If this configuration is not set (default), - set to null or set to false, entities which don't conform to one of the pre-defined types will - not be surfaced. - :paramtype include_typeless_entities: bool - :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + :ivar include_typeless_entities: Determines whether or not to include entities which are well + known but don't conform to a pre-defined type. If this configuration is not set (default), set + to null or set to false, entities which don't conform to one of the pre-defined types will not + be surfaced. + :vartype include_typeless_entities: bool + :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. - :paramtype minimum_precision: float + :vartype minimum_precision: float """ _validation = { @@ -2006,6 +2536,41 @@ def __init__( minimum_precision: Optional[float] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword categories: A list of entity categories that should be extracted. + :paramtype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", + "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage + :keyword include_typeless_entities: Determines whether or not to include entities which are + well known but don't conform to a pre-defined type. If this configuration is not set (default), + set to null or set to false, entities which don't conform to one of the pre-defined types will + not be surfaced. + :paramtype include_typeless_entities: bool + :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence score is greater than the value specified. If not set (default), or if explicitly + set to null, all entities will be included. + :paramtype minimum_precision: float + """ super(EntityRecognitionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Text.EntityRecognitionSkill' # type: str self.categories = categories @@ -2019,38 +2584,37 @@ class EntityRecognitionSkillV3(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword categories: A list of entity categories that should be extracted. - :paramtype categories: list[str] - :keyword default_language_code: A value indicating which language code to use. Default is en. - :paramtype default_language_code: str - :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar categories: A list of entity categories that should be extracted. + :vartype categories: list[str] + :ivar default_language_code: A value indicating which language code to use. Default is en. + :vartype default_language_code: str + :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. - :paramtype minimum_precision: float - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str + :vartype minimum_precision: float + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It will default to the latest available when not specified. We recommend you do not specify + this value unless absolutely necessary. + :vartype model_version: str """ _validation = { @@ -2087,6 +2651,37 @@ def __init__( model_version: Optional[str] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword categories: A list of entity categories that should be extracted. + :paramtype categories: list[str] + :keyword default_language_code: A value indicating which language code to use. Default is en. + :paramtype default_language_code: str + :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence score is greater than the value specified. If not set (default), or if explicitly + set to null, all entities will be included. + :paramtype minimum_precision: float + :keyword model_version: The version of the model to use when calling the Text Analytics + service. It will default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary. + :paramtype model_version: str + """ super(EntityRecognitionSkillV3, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Text.V3.EntityRecognitionSkill' # type: str self.categories = categories @@ -2100,13 +2695,13 @@ class FieldMapping(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword source_field_name: Required. The name of the field in the data source. - :paramtype source_field_name: str - :keyword target_field_name: The name of the target field in the index. Same as the source field + :ivar source_field_name: Required. The name of the field in the data source. + :vartype source_field_name: str + :ivar target_field_name: The name of the target field in the index. Same as the source field name by default. - :paramtype target_field_name: str - :keyword mapping_function: A function to apply to each source field value before indexing. - :paramtype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction + :vartype target_field_name: str + :ivar mapping_function: A function to apply to each source field value before indexing. + :vartype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction """ _validation = { @@ -2127,6 +2722,15 @@ def __init__( mapping_function: Optional["FieldMappingFunction"] = None, **kwargs ): + """ + :keyword source_field_name: Required. The name of the field in the data source. + :paramtype source_field_name: str + :keyword target_field_name: The name of the target field in the index. Same as the source field + name by default. + :paramtype target_field_name: str + :keyword mapping_function: A function to apply to each source field value before indexing. + :paramtype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction + """ super(FieldMapping, self).__init__(**kwargs) self.source_field_name = source_field_name self.target_field_name = target_field_name @@ -2138,11 +2742,11 @@ class FieldMappingFunction(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the field mapping function. - :paramtype name: str - :keyword parameters: A dictionary of parameter name/value pairs to pass to the function. Each + :ivar name: Required. The name of the field mapping function. + :vartype name: str + :ivar parameters: A dictionary of parameter name/value pairs to pass to the function. Each value must be of a primitive type. - :paramtype parameters: dict[str, any] + :vartype parameters: dict[str, any] """ _validation = { @@ -2161,6 +2765,13 @@ def __init__( parameters: Optional[Dict[str, Any]] = None, **kwargs ): + """ + :keyword name: Required. The name of the field mapping function. + :paramtype name: str + :keyword parameters: A dictionary of parameter name/value pairs to pass to the function. Each + value must be of a primitive type. + :paramtype parameters: dict[str, any] + """ super(FieldMappingFunction, self).__init__(**kwargs) self.name = name self.parameters = parameters @@ -2171,21 +2782,21 @@ class FreshnessScoringFunction(ScoringFunction): All required parameters must be populated in order to send to Azure. - :keyword type: Required. Indicates the type of function to use. Valid values include magnitude, + :ivar type: Required. Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case.Constant filled by server. - :paramtype type: str - :keyword field_name: Required. The name of the field used as input to the scoring function. - :paramtype field_name: str - :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal - to 1.0. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document + :vartype type: str + :ivar field_name: Required. The name of the field used as input to the scoring function. + :vartype field_name: str + :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", "logarithmic". - :paramtype interpolation: str or + :vartype interpolation: str or ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :keyword parameters: Required. Parameter values for the freshness scoring function. - :paramtype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters + :ivar parameters: Required. Parameter values for the freshness scoring function. + :vartype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters """ _validation = { @@ -2212,6 +2823,20 @@ def __init__( interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None, **kwargs ): + """ + :keyword field_name: Required. The name of the field used as input to the scoring function. + :paramtype field_name: str + :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal + to 1.0. + :paramtype boost: float + :keyword interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :paramtype interpolation: str or + ~azure.search.documents.indexes.models.ScoringFunctionInterpolation + :keyword parameters: Required. Parameter values for the freshness scoring function. + :paramtype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters + """ super(FreshnessScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) self.type = 'freshness' # type: str self.parameters = parameters @@ -2222,9 +2847,9 @@ class FreshnessScoringParameters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword boosting_duration: Required. The expiration period after which boosting will stop for - a particular document. - :paramtype boosting_duration: ~datetime.timedelta + :ivar boosting_duration: Required. The expiration period after which boosting will stop for a + particular document. + :vartype boosting_duration: ~datetime.timedelta """ _validation = { @@ -2241,6 +2866,11 @@ def __init__( boosting_duration: datetime.timedelta, **kwargs ): + """ + :keyword boosting_duration: Required. The expiration period after which boosting will stop for + a particular document. + :paramtype boosting_duration: ~datetime.timedelta + """ super(FreshnessScoringParameters, self).__init__(**kwargs) self.boosting_duration = boosting_duration @@ -2272,6 +2902,8 @@ def __init__( self, **kwargs ): + """ + """ super(GetIndexStatisticsResult, self).__init__(**kwargs) self.document_count = None self.storage_size = None @@ -2282,11 +2914,11 @@ class HighWaterMarkChangeDetectionPolicy(DataChangeDetectionPolicy): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the data change detection + :ivar odata_type: Required. Identifies the concrete type of the data change detection policy.Constant filled by server. - :paramtype odata_type: str - :keyword high_water_mark_column_name: Required. The name of the high water mark column. - :paramtype high_water_mark_column_name: str + :vartype odata_type: str + :ivar high_water_mark_column_name: Required. The name of the high water mark column. + :vartype high_water_mark_column_name: str """ _validation = { @@ -2305,6 +2937,10 @@ def __init__( high_water_mark_column_name: str, **kwargs ): + """ + :keyword high_water_mark_column_name: Required. The name of the high water mark column. + :paramtype high_water_mark_column_name: str + """ super(HighWaterMarkChangeDetectionPolicy, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' # type: str self.high_water_mark_column_name = high_water_mark_column_name @@ -2315,34 +2951,33 @@ class ImageAnalysisSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "en", "es", "ja", "pt", "zh". - :paramtype default_language_code: str or + :vartype default_language_code: str or ~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage - :keyword visual_features: A list of visual features. - :paramtype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature] - :keyword details: A string indicating which domain-specific details to return. - :paramtype details: list[str or ~azure.search.documents.indexes.models.ImageDetail] + :ivar visual_features: A list of visual features. + :vartype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature] + :ivar details: A string indicating which domain-specific details to return. + :vartype details: list[str or ~azure.search.documents.indexes.models.ImageDetail] """ _validation = { @@ -2376,6 +3011,33 @@ def __init__( details: Optional[List[Union[str, "ImageDetail"]]] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "en", "es", "ja", "pt", "zh". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage + :keyword visual_features: A list of visual features. + :paramtype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature] + :keyword details: A string indicating which domain-specific details to return. + :paramtype details: list[str or ~azure.search.documents.indexes.models.ImageDetail] + """ super(ImageAnalysisSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Vision.ImageAnalysisSkill' # type: str self.default_language_code = default_language_code @@ -2437,6 +3099,8 @@ def __init__( self, **kwargs ): + """ + """ super(IndexerCurrentState, self).__init__(**kwargs) self.mode = None self.all_docs_initial_change_tracking_state = None @@ -2521,6 +3185,8 @@ def __init__( self, **kwargs ): + """ + """ super(IndexerExecutionResult, self).__init__(**kwargs) self.status = None self.status_detail = None @@ -2539,19 +3205,18 @@ def __init__( class IndexingParameters(msrest.serialization.Model): """Represents parameters for indexer execution. - :keyword batch_size: The number of items that are read from the data source and indexed as a + :ivar batch_size: The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. - :paramtype batch_size: int - :keyword max_failed_items: The maximum number of items that can fail indexing for indexer + :vartype batch_size: int + :ivar max_failed_items: The maximum number of items that can fail indexing for indexer execution to still be considered successful. -1 means no limit. Default is 0. - :paramtype max_failed_items: int - :keyword max_failed_items_per_batch: The maximum number of items in a single batch that can - fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. - :paramtype max_failed_items_per_batch: int - :keyword configuration: A dictionary of indexer-specific configuration properties. Each name is + :vartype max_failed_items: int + :ivar max_failed_items_per_batch: The maximum number of items in a single batch that can fail + indexing for the batch to still be considered successful. -1 means no limit. Default is 0. + :vartype max_failed_items_per_batch: int + :ivar configuration: A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. - :paramtype configuration: - ~azure.search.documents.indexes.models.IndexingParametersConfiguration + :vartype configuration: ~azure.search.documents.indexes.models.IndexingParametersConfiguration """ _attribute_map = { @@ -2570,6 +3235,21 @@ def __init__( configuration: Optional["IndexingParametersConfiguration"] = None, **kwargs ): + """ + :keyword batch_size: The number of items that are read from the data source and indexed as a + single batch in order to improve performance. The default depends on the data source type. + :paramtype batch_size: int + :keyword max_failed_items: The maximum number of items that can fail indexing for indexer + execution to still be considered successful. -1 means no limit. Default is 0. + :paramtype max_failed_items: int + :keyword max_failed_items_per_batch: The maximum number of items in a single batch that can + fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. + :paramtype max_failed_items_per_batch: int + :keyword configuration: A dictionary of indexer-specific configuration properties. Each name is + the name of a specific property. Each value must be of a primitive type. + :paramtype configuration: + ~azure.search.documents.indexes.models.IndexingParametersConfiguration + """ super(IndexingParameters, self).__init__(**kwargs) self.batch_size = batch_size self.max_failed_items = max_failed_items @@ -2580,73 +3260,73 @@ def __init__( class IndexingParametersConfiguration(msrest.serialization.Model): """A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. - :keyword additional_properties: Unmatched properties from the message are deserialized to this + :ivar additional_properties: Unmatched properties from the message are deserialized to this collection. - :paramtype additional_properties: dict[str, any] - :keyword parsing_mode: Represents the parsing mode for indexing from an Azure blob data source. + :vartype additional_properties: dict[str, any] + :ivar parsing_mode: Represents the parsing mode for indexing from an Azure blob data source. Possible values include: "default", "text", "delimitedText", "json", "jsonArray", "jsonLines". Default value: "default". - :paramtype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode - :keyword excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore - when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip - over those files during indexing. - :paramtype excluded_file_name_extensions: str - :keyword indexed_file_name_extensions: Comma-delimited list of filename extensions to select - when processing from Azure blob storage. For example, you could focus indexing on specific + :vartype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode + :ivar excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore when + processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip over + those files during indexing. + :vartype excluded_file_name_extensions: str + :ivar indexed_file_name_extensions: Comma-delimited list of filename extensions to select when + processing from Azure blob storage. For example, you could focus indexing on specific application files ".docx, .pptx, .msg" to specifically include those file types. - :paramtype indexed_file_name_extensions: str - :keyword fail_on_unsupported_content_type: For Azure blobs, set to false if you want to - continue indexing when an unsupported content type is encountered, and you don't know all the - content types (file extensions) in advance. - :paramtype fail_on_unsupported_content_type: bool - :keyword fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue + :vartype indexed_file_name_extensions: str + :ivar fail_on_unsupported_content_type: For Azure blobs, set to false if you want to continue + indexing when an unsupported content type is encountered, and you don't know all the content + types (file extensions) in advance. + :vartype fail_on_unsupported_content_type: bool + :ivar fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue indexing if a document fails indexing. - :paramtype fail_on_unprocessable_document: bool - :keyword index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this - property to true to still index storage metadata for blob content that is too large to process. + :vartype fail_on_unprocessable_document: bool + :ivar index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this property + to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://docs.microsoft.com/azure/search/search-limits-quotas-capacity. - :paramtype index_storage_metadata_only_for_oversized_documents: bool - :keyword delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column + :vartype index_storage_metadata_only_for_oversized_documents: bool + :ivar delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source fields to destination fields in an index. - :paramtype delimited_text_headers: str - :keyword delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character + :vartype delimited_text_headers: str + :ivar delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character delimiter for CSV files where each line starts a new document (for example, "|"). - :paramtype delimited_text_delimiter: str - :keyword first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line - of each blob contains headers. - :paramtype first_line_contains_headers: bool - :keyword document_root: For JSON arrays, given a structured or semi-structured document, you - can specify a path to the array using this property. - :paramtype document_root: str - :keyword data_to_extract: Specifies the data to extract from Azure blob storage and tells the + :vartype delimited_text_delimiter: str + :ivar first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line of + each blob contains headers. + :vartype first_line_contains_headers: bool + :ivar document_root: For JSON arrays, given a structured or semi-structured document, you can + specify a path to the array using this property. + :vartype document_root: str + :ivar data_to_extract: Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. Possible values include: "storageMetadata", "allMetadata", "contentAndMetadata". Default value: "contentAndMetadata". - :paramtype data_to_extract: str or + :vartype data_to_extract: str or ~azure.search.documents.indexes.models.BlobIndexerDataToExtract - :keyword image_action: Determines how to process embedded images and image files in Azure blob + :ivar image_action: Determines how to process embedded images and image files in Azure blob storage. Setting the "imageAction" configuration to any value other than "none" requires that a skillset also be attached to that indexer. Possible values include: "none", "generateNormalizedImages", "generateNormalizedImagePerPage". Default value: "none". - :paramtype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction - :keyword allow_skillset_to_read_file_data: If true, will create a path //document//file_data - that is an object representing the original file data downloaded from your blob data source. - This allows you to pass the original file data to a custom skill for processing within the + :vartype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction + :ivar allow_skillset_to_read_file_data: If true, will create a path //document//file_data that + is an object representing the original file data downloaded from your blob data source. This + allows you to pass the original file data to a custom skill for processing within the enrichment pipeline, or to the Document Extraction skill. - :paramtype allow_skillset_to_read_file_data: bool - :keyword pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files - in Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none". - :paramtype pdf_text_rotation_algorithm: str or + :vartype allow_skillset_to_read_file_data: bool + :ivar pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files in + Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none". + :vartype pdf_text_rotation_algorithm: str or ~azure.search.documents.indexes.models.BlobIndexerPDFTextRotationAlgorithm - :keyword execution_environment: Specifies the environment in which the indexer should execute. + :ivar execution_environment: Specifies the environment in which the indexer should execute. Possible values include: "standard", "private". Default value: "standard". - :paramtype execution_environment: str or + :vartype execution_environment: str or ~azure.search.documents.indexes.models.IndexerExecutionEnvironment - :keyword query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL - database data sources, specified in the format "hh:mm:ss". - :paramtype query_timeout: str + :ivar query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL database + data sources, specified in the format "hh:mm:ss". + :vartype query_timeout: str """ _attribute_map = { @@ -2691,6 +3371,75 @@ def __init__( query_timeout: Optional[str] = "00:05:00", **kwargs ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, any] + :keyword parsing_mode: Represents the parsing mode for indexing from an Azure blob data source. + Possible values include: "default", "text", "delimitedText", "json", "jsonArray", "jsonLines". + Default value: "default". + :paramtype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode + :keyword excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore + when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip + over those files during indexing. + :paramtype excluded_file_name_extensions: str + :keyword indexed_file_name_extensions: Comma-delimited list of filename extensions to select + when processing from Azure blob storage. For example, you could focus indexing on specific + application files ".docx, .pptx, .msg" to specifically include those file types. + :paramtype indexed_file_name_extensions: str + :keyword fail_on_unsupported_content_type: For Azure blobs, set to false if you want to + continue indexing when an unsupported content type is encountered, and you don't know all the + content types (file extensions) in advance. + :paramtype fail_on_unsupported_content_type: bool + :keyword fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue + indexing if a document fails indexing. + :paramtype fail_on_unprocessable_document: bool + :keyword index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this + property to true to still index storage metadata for blob content that is too large to process. + Oversized blobs are treated as errors by default. For limits on blob size, see + https://docs.microsoft.com/azure/search/search-limits-quotas-capacity. + :paramtype index_storage_metadata_only_for_oversized_documents: bool + :keyword delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column + headers, useful for mapping source fields to destination fields in an index. + :paramtype delimited_text_headers: str + :keyword delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character + delimiter for CSV files where each line starts a new document (for example, "|"). + :paramtype delimited_text_delimiter: str + :keyword first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line + of each blob contains headers. + :paramtype first_line_contains_headers: bool + :keyword document_root: For JSON arrays, given a structured or semi-structured document, you + can specify a path to the array using this property. + :paramtype document_root: str + :keyword data_to_extract: Specifies the data to extract from Azure blob storage and tells the + indexer which data to extract from image content when "imageAction" is set to a value other + than "none". This applies to embedded image content in a .PDF or other application, or image + files such as .jpg and .png, in Azure blobs. Possible values include: "storageMetadata", + "allMetadata", "contentAndMetadata". Default value: "contentAndMetadata". + :paramtype data_to_extract: str or + ~azure.search.documents.indexes.models.BlobIndexerDataToExtract + :keyword image_action: Determines how to process embedded images and image files in Azure blob + storage. Setting the "imageAction" configuration to any value other than "none" requires that + a skillset also be attached to that indexer. Possible values include: "none", + "generateNormalizedImages", "generateNormalizedImagePerPage". Default value: "none". + :paramtype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction + :keyword allow_skillset_to_read_file_data: If true, will create a path //document//file_data + that is an object representing the original file data downloaded from your blob data source. + This allows you to pass the original file data to a custom skill for processing within the + enrichment pipeline, or to the Document Extraction skill. + :paramtype allow_skillset_to_read_file_data: bool + :keyword pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files + in Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none". + :paramtype pdf_text_rotation_algorithm: str or + ~azure.search.documents.indexes.models.BlobIndexerPDFTextRotationAlgorithm + :keyword execution_environment: Specifies the environment in which the indexer should execute. + Possible values include: "standard", "private". Default value: "standard". + :paramtype execution_environment: str or + ~azure.search.documents.indexes.models.IndexerExecutionEnvironment + :keyword query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL + database data sources, specified in the format "hh:mm:ss". + :paramtype query_timeout: str + """ super(IndexingParametersConfiguration, self).__init__(**kwargs) self.additional_properties = additional_properties self.parsing_mode = parsing_mode @@ -2716,10 +3465,10 @@ class IndexingSchedule(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword interval: Required. The interval of time between indexer executions. - :paramtype interval: ~datetime.timedelta - :keyword start_time: The time when an indexer should start running. - :paramtype start_time: ~datetime.datetime + :ivar interval: Required. The interval of time between indexer executions. + :vartype interval: ~datetime.timedelta + :ivar start_time: The time when an indexer should start running. + :vartype start_time: ~datetime.datetime """ _validation = { @@ -2738,6 +3487,12 @@ def __init__( start_time: Optional[datetime.datetime] = None, **kwargs ): + """ + :keyword interval: Required. The interval of time between indexer executions. + :paramtype interval: ~datetime.timedelta + :keyword start_time: The time when an indexer should start running. + :paramtype start_time: ~datetime.datetime + """ super(IndexingSchedule, self).__init__(**kwargs) self.interval = interval self.start_time = start_time @@ -2748,14 +3503,14 @@ class InputFieldMappingEntry(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the input. - :paramtype name: str - :keyword source: The source of the input. - :paramtype source: str - :keyword source_context: The source context used for selecting recursive inputs. - :paramtype source_context: str - :keyword inputs: The recursive inputs used when creating a complex type. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar name: Required. The name of the input. + :vartype name: str + :ivar source: The source of the input. + :vartype source: str + :ivar source_context: The source context used for selecting recursive inputs. + :vartype source_context: str + :ivar inputs: The recursive inputs used when creating a complex type. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] """ _validation = { @@ -2778,6 +3533,16 @@ def __init__( inputs: Optional[List["InputFieldMappingEntry"]] = None, **kwargs ): + """ + :keyword name: Required. The name of the input. + :paramtype name: str + :keyword source: The source of the input. + :paramtype source: str + :keyword source_context: The source context used for selecting recursive inputs. + :paramtype source_context: str + :keyword inputs: The recursive inputs used when creating a complex type. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + """ super(InputFieldMappingEntry, self).__init__(**kwargs) self.name = name self.source = source @@ -2790,18 +3555,18 @@ class KeepTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword keep_words: Required. The list of words to keep. - :paramtype keep_words: list[str] - :keyword lower_case_keep_words: A value indicating whether to lower case all words first. - Default is false. - :paramtype lower_case_keep_words: bool + :vartype name: str + :ivar keep_words: Required. The list of words to keep. + :vartype keep_words: list[str] + :ivar lower_case_keep_words: A value indicating whether to lower case all words first. Default + is false. + :vartype lower_case_keep_words: bool """ _validation = { @@ -2825,6 +3590,17 @@ def __init__( lower_case_keep_words: Optional[bool] = False, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword keep_words: Required. The list of words to keep. + :paramtype keep_words: list[str] + :keyword lower_case_keep_words: A value indicating whether to lower case all words first. + Default is false. + :paramtype lower_case_keep_words: bool + """ super(KeepTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.KeepTokenFilter' # type: str self.keep_words = keep_words @@ -2836,38 +3612,37 @@ class KeyPhraseExtractionSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv". - :paramtype default_language_code: str or + :vartype default_language_code: str or ~azure.search.documents.indexes.models.KeyPhraseExtractionSkillLanguage - :keyword max_key_phrase_count: A number indicating how many key phrases to return. If absent, - all identified key phrases will be returned. - :paramtype max_key_phrase_count: int - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str + :ivar max_key_phrase_count: A number indicating how many key phrases to return. If absent, all + identified key phrases will be returned. + :vartype max_key_phrase_count: int + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It will default to the latest available when not specified. We recommend you do not specify + this value unless absolutely necessary. + :vartype model_version: str """ _validation = { @@ -2901,6 +3676,37 @@ def __init__( model_version: Optional[str] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl", + "pt-PT", "pt-BR", "ru", "es", "sv". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.KeyPhraseExtractionSkillLanguage + :keyword max_key_phrase_count: A number indicating how many key phrases to return. If absent, + all identified key phrases will be returned. + :paramtype max_key_phrase_count: int + :keyword model_version: The version of the model to use when calling the Text Analytics + service. It will default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary. + :paramtype model_version: str + """ super(KeyPhraseExtractionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Text.KeyPhraseExtractionSkill' # type: str self.default_language_code = default_language_code @@ -2913,18 +3719,18 @@ class KeywordMarkerTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword keywords: Required. A list of words to mark as keywords. - :paramtype keywords: list[str] - :keyword ignore_case: A value indicating whether to ignore case. If true, all words are - converted to lower case first. Default is false. - :paramtype ignore_case: bool + :vartype name: str + :ivar keywords: Required. A list of words to mark as keywords. + :vartype keywords: list[str] + :ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted + to lower case first. Default is false. + :vartype ignore_case: bool """ _validation = { @@ -2948,6 +3754,17 @@ def __init__( ignore_case: Optional[bool] = False, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword keywords: Required. A list of words to mark as keywords. + :paramtype keywords: list[str] + :keyword ignore_case: A value indicating whether to ignore case. If true, all words are + converted to lower case first. Default is false. + :paramtype ignore_case: bool + """ super(KeywordMarkerTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' # type: str self.keywords = keywords @@ -2959,15 +3776,15 @@ class KeywordTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword buffer_size: The read buffer size in bytes. Default is 256. - :paramtype buffer_size: int + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar buffer_size: The read buffer size in bytes. Default is 256. + :vartype buffer_size: int """ _validation = { @@ -2988,6 +3805,14 @@ def __init__( buffer_size: Optional[int] = 256, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword buffer_size: The read buffer size in bytes. Default is 256. + :paramtype buffer_size: int + """ super(KeywordTokenizer, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizer' # type: str self.buffer_size = buffer_size @@ -2998,16 +3823,16 @@ class KeywordTokenizerV2(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 256. Tokens longer than the + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 256. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int + :vartype max_token_length: int """ _validation = { @@ -3029,6 +3854,15 @@ def __init__( max_token_length: Optional[int] = 256, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Default is 256. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :paramtype max_token_length: int + """ super(KeywordTokenizerV2, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizerV2' # type: str self.max_token_length = max_token_length @@ -3039,33 +3873,32 @@ class LanguageDetectionSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_country_hint: A country code to use as a hint to the language detection model - if it cannot disambiguate the language. - :paramtype default_country_hint: str - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_country_hint: A country code to use as a hint to the language detection model if + it cannot disambiguate the language. + :vartype default_country_hint: str + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It will default to the latest available when not specified. We recommend you do not specify + this value unless absolutely necessary. + :vartype model_version: str """ _validation = { @@ -3097,6 +3930,32 @@ def __init__( model_version: Optional[str] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_country_hint: A country code to use as a hint to the language detection model + if it cannot disambiguate the language. + :paramtype default_country_hint: str + :keyword model_version: The version of the model to use when calling the Text Analytics + service. It will default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary. + :paramtype model_version: str + """ super(LanguageDetectionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Text.LanguageDetectionSkill' # type: str self.default_country_hint = default_country_hint @@ -3108,18 +3967,18 @@ class LengthTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be - less than the value of max. - :paramtype min_length: int - :keyword max_length: The maximum length in characters. Default and maximum is 300. - :paramtype max_length: int + :vartype name: str + :ivar min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be less + than the value of max. + :vartype min_length: int + :ivar max_length: The maximum length in characters. Default and maximum is 300. + :vartype max_length: int """ _validation = { @@ -3144,6 +4003,17 @@ def __init__( max_length: Optional[int] = 300, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be + less than the value of max. + :paramtype min_length: int + :keyword max_length: The maximum length in characters. Default and maximum is 300. + :paramtype max_length: int + """ super(LengthTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.LengthTokenFilter' # type: str self.min_length = min_length @@ -3155,18 +4025,18 @@ class LimitTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword max_token_count: The maximum number of tokens to produce. Default is 1. - :paramtype max_token_count: int - :keyword consume_all_tokens: A value indicating whether all tokens from the input must be - consumed even if maxTokenCount is reached. Default is false. - :paramtype consume_all_tokens: bool + :vartype name: str + :ivar max_token_count: The maximum number of tokens to produce. Default is 1. + :vartype max_token_count: int + :ivar consume_all_tokens: A value indicating whether all tokens from the input must be consumed + even if maxTokenCount is reached. Default is false. + :vartype consume_all_tokens: bool """ _validation = { @@ -3189,6 +4059,17 @@ def __init__( consume_all_tokens: Optional[bool] = False, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_count: The maximum number of tokens to produce. Default is 1. + :paramtype max_token_count: int + :keyword consume_all_tokens: A value indicating whether all tokens from the input must be + consumed even if maxTokenCount is reached. Default is false. + :paramtype consume_all_tokens: bool + """ super(LimitTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.LimitTokenFilter' # type: str self.max_token_count = max_token_count @@ -3218,6 +4099,8 @@ def __init__( self, **kwargs ): + """ + """ super(ListDataSourcesResult, self).__init__(**kwargs) self.data_sources = None @@ -3245,6 +4128,8 @@ def __init__( self, **kwargs ): + """ + """ super(ListIndexersResult, self).__init__(**kwargs) self.indexers = None @@ -3272,6 +4157,8 @@ def __init__( self, **kwargs ): + """ + """ super(ListIndexesResult, self).__init__(**kwargs) self.indexes = None @@ -3299,6 +4186,8 @@ def __init__( self, **kwargs ): + """ + """ super(ListSkillsetsResult, self).__init__(**kwargs) self.skillsets = None @@ -3326,6 +4215,8 @@ def __init__( self, **kwargs ): + """ + """ super(ListSynonymMapsResult, self).__init__(**kwargs) self.synonym_maps = None @@ -3335,18 +4226,18 @@ class LuceneStandardAnalyzer(LexicalAnalyzer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the analyzer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + :vartype odata_type: str + :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int - :keyword stopwords: A list of stopwords. - :paramtype stopwords: list[str] + :vartype max_token_length: int + :ivar stopwords: A list of stopwords. + :vartype stopwords: list[str] """ _validation = { @@ -3370,6 +4261,17 @@ def __init__( stopwords: Optional[List[str]] = None, **kwargs ): + """ + :keyword name: Required. The name of the analyzer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :paramtype max_token_length: int + :keyword stopwords: A list of stopwords. + :paramtype stopwords: list[str] + """ super(LuceneStandardAnalyzer, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' # type: str self.max_token_length = max_token_length @@ -3381,16 +4283,16 @@ class LuceneStandardTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the maximum length are split. - :paramtype max_token_length: int + :vartype max_token_length: int """ _validation = { @@ -3411,6 +4313,15 @@ def __init__( max_token_length: Optional[int] = 255, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. + :paramtype max_token_length: int + """ super(LuceneStandardTokenizer, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' # type: str self.max_token_length = max_token_length @@ -3421,16 +4332,16 @@ class LuceneStandardTokenizerV2(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int + :vartype max_token_length: int """ _validation = { @@ -3452,6 +4363,15 @@ def __init__( max_token_length: Optional[int] = 255, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :paramtype max_token_length: int + """ super(LuceneStandardTokenizerV2, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' # type: str self.max_token_length = max_token_length @@ -3462,21 +4382,21 @@ class MagnitudeScoringFunction(ScoringFunction): All required parameters must be populated in order to send to Azure. - :keyword type: Required. Indicates the type of function to use. Valid values include magnitude, + :ivar type: Required. Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case.Constant filled by server. - :paramtype type: str - :keyword field_name: Required. The name of the field used as input to the scoring function. - :paramtype field_name: str - :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal - to 1.0. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document + :vartype type: str + :ivar field_name: Required. The name of the field used as input to the scoring function. + :vartype field_name: str + :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", "logarithmic". - :paramtype interpolation: str or + :vartype interpolation: str or ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :keyword parameters: Required. Parameter values for the magnitude scoring function. - :paramtype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters + :ivar parameters: Required. Parameter values for the magnitude scoring function. + :vartype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters """ _validation = { @@ -3503,6 +4423,20 @@ def __init__( interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None, **kwargs ): + """ + :keyword field_name: Required. The name of the field used as input to the scoring function. + :paramtype field_name: str + :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal + to 1.0. + :paramtype boost: float + :keyword interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :paramtype interpolation: str or + ~azure.search.documents.indexes.models.ScoringFunctionInterpolation + :keyword parameters: Required. Parameter values for the magnitude scoring function. + :paramtype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters + """ super(MagnitudeScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) self.type = 'magnitude' # type: str self.parameters = parameters @@ -3513,13 +4447,13 @@ class MagnitudeScoringParameters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword boosting_range_start: Required. The field value at which boosting starts. - :paramtype boosting_range_start: float - :keyword boosting_range_end: Required. The field value at which boosting ends. - :paramtype boosting_range_end: float - :keyword should_boost_beyond_range_by_constant: A value indicating whether to apply a constant + :ivar boosting_range_start: Required. The field value at which boosting starts. + :vartype boosting_range_start: float + :ivar boosting_range_end: Required. The field value at which boosting ends. + :vartype boosting_range_end: float + :ivar should_boost_beyond_range_by_constant: A value indicating whether to apply a constant boost for field values beyond the range end value; default is false. - :paramtype should_boost_beyond_range_by_constant: bool + :vartype should_boost_beyond_range_by_constant: bool """ _validation = { @@ -3541,6 +4475,15 @@ def __init__( should_boost_beyond_range_by_constant: Optional[bool] = None, **kwargs ): + """ + :keyword boosting_range_start: Required. The field value at which boosting starts. + :paramtype boosting_range_start: float + :keyword boosting_range_end: Required. The field value at which boosting ends. + :paramtype boosting_range_end: float + :keyword should_boost_beyond_range_by_constant: A value indicating whether to apply a constant + boost for field values beyond the range end value; default is false. + :paramtype should_boost_beyond_range_by_constant: bool + """ super(MagnitudeScoringParameters, self).__init__(**kwargs) self.boosting_range_start = boosting_range_start self.boosting_range_end = boosting_range_end @@ -3552,16 +4495,16 @@ class MappingCharFilter(CharFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the char filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the char filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword mappings: Required. A list of mappings of the following format: "a=>b" (all - occurrences of the character "a" will be replaced with character "b"). - :paramtype mappings: list[str] + :vartype name: str + :ivar mappings: Required. A list of mappings of the following format: "a=>b" (all occurrences + of the character "a" will be replaced with character "b"). + :vartype mappings: list[str] """ _validation = { @@ -3583,6 +4526,15 @@ def __init__( mappings: List[str], **kwargs ): + """ + :keyword name: Required. The name of the char filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword mappings: Required. A list of mappings of the following format: "a=>b" (all + occurrences of the character "a" will be replaced with character "b"). + :paramtype mappings: list[str] + """ super(MappingCharFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.MappingCharFilter' # type: str self.mappings = mappings @@ -3593,32 +4545,31 @@ class MergeSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is - an empty space. - :paramtype insert_pre_tag: str - :keyword insert_post_tag: The tag indicates the end of the merged text. By default, the tag is - an empty space. - :paramtype insert_post_tag: str + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is an + empty space. + :vartype insert_pre_tag: str + :ivar insert_post_tag: The tag indicates the end of the merged text. By default, the tag is an + empty space. + :vartype insert_post_tag: str """ _validation = { @@ -3650,6 +4601,31 @@ def __init__( insert_post_tag: Optional[str] = " ", **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is + an empty space. + :paramtype insert_pre_tag: str + :keyword insert_post_tag: The tag indicates the end of the merged text. By default, the tag is + an empty space. + :paramtype insert_post_tag: str + """ super(MergeSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Text.MergeSkill' # type: str self.insert_pre_tag = insert_pre_tag @@ -3661,29 +4637,29 @@ class MicrosoftLanguageStemmingTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. - :paramtype max_token_length: int - :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used - as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. - :paramtype is_search_tokenizer: bool - :keyword language: The language to use. The default is English. Possible values include: - "arabic", "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english", + :vartype max_token_length: int + :ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as + the search tokenizer, set to false if used as the indexing tokenizer. Default is false. + :vartype is_search_tokenizer: bool + :ivar language: The language to use. The default is English. Possible values include: "arabic", + "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english", "estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian", "icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam", "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish", "swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu". - :paramtype language: str or + :vartype language: str or ~azure.search.documents.indexes.models.MicrosoftStemmingTokenizerLanguage """ @@ -3710,6 +4686,29 @@ def __init__( language: Optional[Union[str, "MicrosoftStemmingTokenizerLanguage"]] = None, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are + split. Maximum token length that can be used is 300 characters. Tokens longer than 300 + characters are first split into tokens of length 300 and then each of those tokens is split + based on the max token length set. Default is 255. + :paramtype max_token_length: int + :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used + as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. + :paramtype is_search_tokenizer: bool + :keyword language: The language to use. The default is English. Possible values include: + "arabic", "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english", + "estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian", + "icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam", + "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", + "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish", + "swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu". + :paramtype language: str or + ~azure.search.documents.indexes.models.MicrosoftStemmingTokenizerLanguage + """ super(MicrosoftLanguageStemmingTokenizer, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer' # type: str self.max_token_length = max_token_length @@ -3722,29 +4721,29 @@ class MicrosoftLanguageTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. - :paramtype max_token_length: int - :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used - as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. - :paramtype is_search_tokenizer: bool - :keyword language: The language to use. The default is English. Possible values include: - "bangla", "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", - "czech", "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", - "icelandic", "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", - "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", - "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", - "tamil", "telugu", "thai", "ukrainian", "urdu", "vietnamese". - :paramtype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage + :vartype max_token_length: int + :ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as + the search tokenizer, set to false if used as the indexing tokenizer. Default is false. + :vartype is_search_tokenizer: bool + :ivar language: The language to use. The default is English. Possible values include: "bangla", + "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", "czech", + "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", "icelandic", + "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", "marathi", + "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", "romanian", + "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", "tamil", + "telugu", "thai", "ukrainian", "urdu", "vietnamese". + :vartype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage """ _validation = { @@ -3770,6 +4769,28 @@ def __init__( language: Optional[Union[str, "MicrosoftTokenizerLanguage"]] = None, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are + split. Maximum token length that can be used is 300 characters. Tokens longer than 300 + characters are first split into tokens of length 300 and then each of those tokens is split + based on the max token length set. Default is 255. + :paramtype max_token_length: int + :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used + as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. + :paramtype is_search_tokenizer: bool + :keyword language: The language to use. The default is English. Possible values include: + "bangla", "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", + "czech", "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", + "icelandic", "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", + "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", + "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", + "tamil", "telugu", "thai", "ukrainian", "urdu", "vietnamese". + :paramtype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage + """ super(MicrosoftLanguageTokenizer, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer' # type: str self.max_token_length = max_token_length @@ -3782,18 +4803,18 @@ class NGramTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. - :paramtype max_gram: int + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. + :vartype max_gram: int """ _validation = { @@ -3816,6 +4837,17 @@ def __init__( max_gram: Optional[int] = 2, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + maxGram. + :paramtype min_gram: int + :keyword max_gram: The maximum n-gram length. Default is 2. + :paramtype max_gram: int + """ super(NGramTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilter' # type: str self.min_gram = min_gram @@ -3827,18 +4859,18 @@ class NGramTokenFilterV2(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than - the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :paramtype max_gram: int + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :vartype max_gram: int """ _validation = { @@ -3863,6 +4895,17 @@ def __init__( max_gram: Optional[int] = 2, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than + the value of maxGram. + :paramtype min_gram: int + :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :paramtype max_gram: int + """ super(NGramTokenFilterV2, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilterV2' # type: str self.min_gram = min_gram @@ -3874,20 +4917,20 @@ class NGramTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than - the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :paramtype max_gram: int - :keyword token_chars: Character classes to keep in the tokens. - :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :vartype max_gram: int + :ivar token_chars: Character classes to keep in the tokens. + :vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] """ _validation = { @@ -3914,6 +4957,19 @@ def __init__( token_chars: Optional[List[Union[str, "TokenCharacterKind"]]] = None, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than + the value of maxGram. + :paramtype min_gram: int + :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :paramtype max_gram: int + :keyword token_chars: Character classes to keep in the tokens. + :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] + """ super(NGramTokenizer, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.NGramTokenizer' # type: str self.min_gram = min_gram @@ -3926,39 +4982,37 @@ class OcrSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el", "hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl", "sr-Latn", "sk". - :paramtype default_language_code: str or - ~azure.search.documents.indexes.models.OcrSkillLanguage - :keyword should_detect_orientation: A value indicating to turn orientation detection on or not. + :vartype default_language_code: str or ~azure.search.documents.indexes.models.OcrSkillLanguage + :ivar should_detect_orientation: A value indicating to turn orientation detection on or not. Default is false. - :paramtype should_detect_orientation: bool - :keyword line_ending: Defines the sequence of characters to use between the lines of text + :vartype should_detect_orientation: bool + :ivar line_ending: Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is "space". Possible values include: "space", "carriageReturn", "lineFeed", "carriageReturnLineFeed". - :paramtype line_ending: str or ~azure.search.documents.indexes.models.LineEnding + :vartype line_ending: str or ~azure.search.documents.indexes.models.LineEnding """ _validation = { @@ -3992,6 +5046,38 @@ def __init__( line_ending: Optional[Union[str, "LineEnding"]] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el", + "hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl", + "sr-Latn", "sk". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.OcrSkillLanguage + :keyword should_detect_orientation: A value indicating to turn orientation detection on or not. + Default is false. + :paramtype should_detect_orientation: bool + :keyword line_ending: Defines the sequence of characters to use between the lines of text + recognized by the OCR skill. The default value is "space". Possible values include: "space", + "carriageReturn", "lineFeed", "carriageReturnLineFeed". + :paramtype line_ending: str or ~azure.search.documents.indexes.models.LineEnding + """ super(OcrSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Vision.OcrSkill' # type: str self.default_language_code = default_language_code @@ -4004,10 +5090,10 @@ class OutputFieldMappingEntry(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the output defined by the skill. - :paramtype name: str - :keyword target_name: The target name of the output. It is optional and default to name. - :paramtype target_name: str + :ivar name: Required. The name of the output defined by the skill. + :vartype name: str + :ivar target_name: The target name of the output. It is optional and default to name. + :vartype target_name: str """ _validation = { @@ -4026,6 +5112,12 @@ def __init__( target_name: Optional[str] = None, **kwargs ): + """ + :keyword name: Required. The name of the output defined by the skill. + :paramtype name: str + :keyword target_name: The target name of the output. It is optional and default to name. + :paramtype target_name: str + """ super(OutputFieldMappingEntry, self).__init__(**kwargs) self.name = name self.target_name = target_name @@ -4036,24 +5128,24 @@ class PathHierarchyTokenizerV2(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword delimiter: The delimiter character to use. Default is "/". - :paramtype delimiter: str - :keyword replacement: A value that, if set, replaces the delimiter character. Default is "/". - :paramtype replacement: str - :keyword max_token_length: The maximum token length. Default and maximum is 300. - :paramtype max_token_length: int - :keyword reverse_token_order: A value indicating whether to generate tokens in reverse order. + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar delimiter: The delimiter character to use. Default is "/". + :vartype delimiter: str + :ivar replacement: A value that, if set, replaces the delimiter character. Default is "/". + :vartype replacement: str + :ivar max_token_length: The maximum token length. Default and maximum is 300. + :vartype max_token_length: int + :ivar reverse_token_order: A value indicating whether to generate tokens in reverse order. Default is false. - :paramtype reverse_token_order: bool - :keyword number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0. - :paramtype number_of_tokens_to_skip: int + :vartype reverse_token_order: bool + :ivar number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0. + :vartype number_of_tokens_to_skip: int """ _validation = { @@ -4083,6 +5175,23 @@ def __init__( number_of_tokens_to_skip: Optional[int] = 0, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword delimiter: The delimiter character to use. Default is "/". + :paramtype delimiter: str + :keyword replacement: A value that, if set, replaces the delimiter character. Default is "/". + :paramtype replacement: str + :keyword max_token_length: The maximum token length. Default and maximum is 300. + :paramtype max_token_length: int + :keyword reverse_token_order: A value indicating whether to generate tokens in reverse order. + Default is false. + :paramtype reverse_token_order: bool + :keyword number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0. + :paramtype number_of_tokens_to_skip: int + """ super(PathHierarchyTokenizerV2, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.PathHierarchyTokenizerV2' # type: str self.delimiter = delimiter @@ -4092,55 +5201,29 @@ def __init__( self.number_of_tokens_to_skip = number_of_tokens_to_skip -class Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model): - """Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema. - - :keyword document_keys: document keys to be reset. - :paramtype document_keys: list[str] - :keyword datasource_document_ids: datasource document identifiers to be reset. - :paramtype datasource_document_ids: list[str] - """ - - _attribute_map = { - 'document_keys': {'key': 'documentKeys', 'type': '[str]'}, - 'datasource_document_ids': {'key': 'datasourceDocumentIds', 'type': '[str]'}, - } - - def __init__( - self, - *, - document_keys: Optional[List[str]] = None, - datasource_document_ids: Optional[List[str]] = None, - **kwargs - ): - super(Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs) - self.document_keys = document_keys - self.datasource_document_ids = datasource_document_ids - - class PatternAnalyzer(LexicalAnalyzer): """Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the analyzer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword lower_case_terms: A value indicating whether terms should be lower-cased. Default is + :vartype odata_type: str + :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar lower_case_terms: A value indicating whether terms should be lower-cased. Default is true. - :paramtype lower_case_terms: bool - :keyword pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more non-word characters. - :paramtype pattern: str - :keyword flags: Regular expression flags. Possible values include: "CANON_EQ", - "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". - :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags - :keyword stopwords: A list of stopwords. - :paramtype stopwords: list[str] + :vartype lower_case_terms: bool + :ivar pattern: A regular expression pattern to match token separators. Default is an expression + that matches one or more non-word characters. + :vartype pattern: str + :ivar flags: Regular expression flags. Possible values include: "CANON_EQ", "CASE_INSENSITIVE", + "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :vartype flags: str or ~azure.search.documents.indexes.models.RegexFlags + :ivar stopwords: A list of stopwords. + :vartype stopwords: list[str] """ _validation = { @@ -4167,6 +5250,23 @@ def __init__( stopwords: Optional[List[str]] = None, **kwargs ): + """ + :keyword name: Required. The name of the analyzer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword lower_case_terms: A value indicating whether terms should be lower-cased. Default is + true. + :paramtype lower_case_terms: bool + :keyword pattern: A regular expression pattern to match token separators. Default is an + expression that matches one or more non-word characters. + :paramtype pattern: str + :keyword flags: Regular expression flags. Possible values include: "CANON_EQ", + "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags + :keyword stopwords: A list of stopwords. + :paramtype stopwords: list[str] + """ super(PatternAnalyzer, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternAnalyzer' # type: str self.lower_case_terms = lower_case_terms @@ -4180,18 +5280,18 @@ class PatternCaptureTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword patterns: Required. A list of patterns to match against each token. - :paramtype patterns: list[str] - :keyword preserve_original: A value indicating whether to return the original token even if one - of the patterns matches. Default is true. - :paramtype preserve_original: bool + :vartype name: str + :ivar patterns: Required. A list of patterns to match against each token. + :vartype patterns: list[str] + :ivar preserve_original: A value indicating whether to return the original token even if one of + the patterns matches. Default is true. + :vartype preserve_original: bool """ _validation = { @@ -4215,6 +5315,17 @@ def __init__( preserve_original: Optional[bool] = True, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword patterns: Required. A list of patterns to match against each token. + :paramtype patterns: list[str] + :keyword preserve_original: A value indicating whether to return the original token even if one + of the patterns matches. Default is true. + :paramtype preserve_original: bool + """ super(PatternCaptureTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternCaptureTokenFilter' # type: str self.patterns = patterns @@ -4226,17 +5337,17 @@ class PatternReplaceCharFilter(CharFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the char filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the char filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword pattern: Required. A regular expression pattern. - :paramtype pattern: str - :keyword replacement: Required. The replacement text. - :paramtype replacement: str + :vartype name: str + :ivar pattern: Required. A regular expression pattern. + :vartype pattern: str + :ivar replacement: Required. The replacement text. + :vartype replacement: str """ _validation = { @@ -4261,6 +5372,16 @@ def __init__( replacement: str, **kwargs ): + """ + :keyword name: Required. The name of the char filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword pattern: Required. A regular expression pattern. + :paramtype pattern: str + :keyword replacement: Required. The replacement text. + :paramtype replacement: str + """ super(PatternReplaceCharFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternReplaceCharFilter' # type: str self.pattern = pattern @@ -4272,17 +5393,17 @@ class PatternReplaceTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword pattern: Required. A regular expression pattern. - :paramtype pattern: str - :keyword replacement: Required. The replacement text. - :paramtype replacement: str + :vartype name: str + :ivar pattern: Required. A regular expression pattern. + :vartype pattern: str + :ivar replacement: Required. The replacement text. + :vartype replacement: str """ _validation = { @@ -4307,6 +5428,16 @@ def __init__( replacement: str, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword pattern: Required. A regular expression pattern. + :paramtype pattern: str + :keyword replacement: Required. The replacement text. + :paramtype replacement: str + """ super(PatternReplaceTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' # type: str self.pattern = pattern @@ -4318,23 +5449,23 @@ class PatternTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more non-word characters. - :paramtype pattern: str - :keyword flags: Regular expression flags. Possible values include: "CANON_EQ", - "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". - :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags - :keyword group: The zero-based ordinal of the matching group in the regular expression pattern - to extract into tokens. Use -1 if you want to use the entire pattern to split the input into + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar pattern: A regular expression pattern to match token separators. Default is an expression + that matches one or more non-word characters. + :vartype pattern: str + :ivar flags: Regular expression flags. Possible values include: "CANON_EQ", "CASE_INSENSITIVE", + "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :vartype flags: str or ~azure.search.documents.indexes.models.RegexFlags + :ivar group: The zero-based ordinal of the matching group in the regular expression pattern to + extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. - :paramtype group: int + :vartype group: int """ _validation = { @@ -4359,6 +5490,22 @@ def __init__( group: Optional[int] = -1, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword pattern: A regular expression pattern to match token separators. Default is an + expression that matches one or more non-word characters. + :paramtype pattern: str + :keyword flags: Regular expression flags. Possible values include: "CANON_EQ", + "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags + :keyword group: The zero-based ordinal of the matching group in the regular expression pattern + to extract into tokens. Use -1 if you want to use the entire pattern to split the input into + tokens, irrespective of matching groups. Default is -1. + :paramtype group: int + """ super(PatternTokenizer, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternTokenizer' # type: str self.pattern = pattern @@ -4371,20 +5518,20 @@ class PhoneticTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword encoder: The phonetic encoder to use. Default is "metaphone". Possible values include: + :vartype name: str + :ivar encoder: The phonetic encoder to use. Default is "metaphone". Possible values include: "metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2", "cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse". - :paramtype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder - :keyword replace_original_tokens: A value indicating whether encoded tokens should replace + :vartype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder + :ivar replace_original_tokens: A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. - :paramtype replace_original_tokens: bool + :vartype replace_original_tokens: bool """ _validation = { @@ -4407,6 +5554,19 @@ def __init__( replace_original_tokens: Optional[bool] = True, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword encoder: The phonetic encoder to use. Default is "metaphone". Possible values include: + "metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2", + "cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse". + :paramtype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder + :keyword replace_original_tokens: A value indicating whether encoded tokens should replace + original tokens. If false, encoded tokens are added as synonyms. Default is true. + :paramtype replace_original_tokens: bool + """ super(PhoneticTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.PhoneticTokenFilter' # type: str self.encoder = encoder @@ -4418,48 +5578,47 @@ class PIIDetectionSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. - :paramtype default_language_code: str - :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. + :vartype default_language_code: str + :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. - :paramtype minimum_precision: float - :keyword masking_mode: A parameter that provides various ways to mask the personal information + :vartype minimum_precision: float + :ivar masking_mode: A parameter that provides various ways to mask the personal information detected in the input text. Default is 'none'. Possible values include: "none", "replace". - :paramtype masking_mode: str or + :vartype masking_mode: str or ~azure.search.documents.indexes.models.PIIDetectionSkillMaskingMode - :keyword masking_character: The character used to mask the text if the maskingMode parameter is + :ivar masking_character: The character used to mask the text if the maskingMode parameter is set to replace. Default is '*'. - :paramtype masking_character: str - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str - :keyword pii_categories: A list of PII entity categories that should be extracted and masked. - :paramtype pii_categories: list[str] - :keyword domain: If specified, will set the PII domain to include only a subset of the entity + :vartype masking_character: str + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It will default to the latest available when not specified. We recommend you do not specify + this value unless absolutely necessary. + :vartype model_version: str + :ivar pii_categories: A list of PII entity categories that should be extracted and masked. + :vartype pii_categories: list[str] + :ivar domain: If specified, will set the PII domain to include only a subset of the entity categories. Possible values include: 'phi', 'none'. Default is 'none'. - :paramtype domain: str + :vartype domain: str """ _validation = { @@ -4503,6 +5662,47 @@ def __init__( domain: Optional[str] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + :paramtype default_language_code: str + :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence score is greater than the value specified. If not set (default), or if explicitly + set to null, all entities will be included. + :paramtype minimum_precision: float + :keyword masking_mode: A parameter that provides various ways to mask the personal information + detected in the input text. Default is 'none'. Possible values include: "none", "replace". + :paramtype masking_mode: str or + ~azure.search.documents.indexes.models.PIIDetectionSkillMaskingMode + :keyword masking_character: The character used to mask the text if the maskingMode parameter is + set to replace. Default is '*'. + :paramtype masking_character: str + :keyword model_version: The version of the model to use when calling the Text Analytics + service. It will default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary. + :paramtype model_version: str + :keyword pii_categories: A list of PII entity categories that should be extracted and masked. + :paramtype pii_categories: list[str] + :keyword domain: If specified, will set the PII domain to include only a subset of the entity + categories. Possible values include: 'phi', 'none'. Default is 'none'. + :paramtype domain: str + """ super(PIIDetectionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Text.PIIDetectionSkill' # type: str self.default_language_code = default_language_code @@ -4517,8 +5717,8 @@ def __init__( class RequestOptions(msrest.serialization.Model): """Parameter group. - :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging. - :paramtype x_ms_client_request_id: str + :ivar x_ms_client_request_id: The tracking ID sent with the request to help with debugging. + :vartype x_ms_client_request_id: str """ _attribute_map = { @@ -4531,6 +5731,10 @@ def __init__( x_ms_client_request_id: Optional[str] = None, **kwargs ): + """ + :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging. + :paramtype x_ms_client_request_id: str + """ super(RequestOptions, self).__init__(**kwargs) self.x_ms_client_request_id = x_ms_client_request_id @@ -4540,10 +5744,10 @@ class ResourceCounter(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword usage: Required. The resource usage amount. - :paramtype usage: long - :keyword quota: The resource amount quota. - :paramtype quota: long + :ivar usage: Required. The resource usage amount. + :vartype usage: long + :ivar quota: The resource amount quota. + :vartype quota: long """ _validation = { @@ -4562,6 +5766,12 @@ def __init__( quota: Optional[int] = None, **kwargs ): + """ + :keyword usage: Required. The resource usage amount. + :paramtype usage: long + :keyword quota: The resource amount quota. + :paramtype quota: long + """ super(ResourceCounter, self).__init__(**kwargs) self.usage = usage self.quota = quota @@ -4572,17 +5782,17 @@ class ScoringProfile(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the scoring profile. - :paramtype name: str - :keyword text_weights: Parameters that boost scoring based on text matches in certain index + :ivar name: Required. The name of the scoring profile. + :vartype name: str + :ivar text_weights: Parameters that boost scoring based on text matches in certain index fields. - :paramtype text_weights: ~azure.search.documents.indexes.models.TextWeights - :keyword functions: The collection of functions that influence the scoring of documents. - :paramtype functions: list[~azure.search.documents.indexes.models.ScoringFunction] - :keyword function_aggregation: A value indicating how the results of individual scoring - functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. - Possible values include: "sum", "average", "minimum", "maximum", "firstMatching". - :paramtype function_aggregation: str or + :vartype text_weights: ~azure.search.documents.indexes.models.TextWeights + :ivar functions: The collection of functions that influence the scoring of documents. + :vartype functions: list[~azure.search.documents.indexes.models.ScoringFunction] + :ivar function_aggregation: A value indicating how the results of individual scoring functions + should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Possible + values include: "sum", "average", "minimum", "maximum", "firstMatching". + :vartype function_aggregation: str or ~azure.search.documents.indexes.models.ScoringFunctionAggregation """ @@ -4606,6 +5816,20 @@ def __init__( function_aggregation: Optional[Union[str, "ScoringFunctionAggregation"]] = None, **kwargs ): + """ + :keyword name: Required. The name of the scoring profile. + :paramtype name: str + :keyword text_weights: Parameters that boost scoring based on text matches in certain index + fields. + :paramtype text_weights: ~azure.search.documents.indexes.models.TextWeights + :keyword functions: The collection of functions that influence the scoring of documents. + :paramtype functions: list[~azure.search.documents.indexes.models.ScoringFunction] + :keyword function_aggregation: A value indicating how the results of individual scoring + functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. + Possible values include: "sum", "average", "minimum", "maximum", "firstMatching". + :paramtype function_aggregation: str or + ~azure.search.documents.indexes.models.ScoringFunctionAggregation + """ super(ScoringProfile, self).__init__(**kwargs) self.name = name self.text_weights = text_weights @@ -4644,6 +5868,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchError, self).__init__(**kwargs) self.code = None self.message = None @@ -4655,43 +5881,43 @@ class SearchField(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the field, which must be unique within the fields - collection of the index or parent field. - :paramtype name: str - :keyword type: Required. The data type of the field. Possible values include: "Edm.String", + :ivar name: Required. The name of the field, which must be unique within the fields collection + of the index or parent field. + :vartype name: str + :ivar type: Required. The data type of the field. Possible values include: "Edm.String", "Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset", "Edm.GeographyPoint", "Edm.ComplexType". - :paramtype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType - :keyword key: A value indicating whether the field uniquely identifies documents in the index. + :vartype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType + :ivar key: A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type Edm.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is false for simple fields and null for complex fields. - :paramtype key: bool - :keyword retrievable: A value indicating whether the field can be returned in a search result. - You can disable this option if you want to use a field (for example, margin) as a filter, - sorting, or scoring mechanism but do not want the field to be visible to the end user. This - property must be true for key fields, and it must be null for complex fields. This property can - be changed on existing fields. Enabling this property does not cause any increase in index - storage requirements. Default is true for simple fields and null for complex fields. - :paramtype retrievable: bool - :keyword searchable: A value indicating whether the field is full-text searchable. This means - it will undergo analysis such as word-breaking during indexing. If you set a searchable field - to a value like "sunny day", internally it will be split into the individual tokens "sunny" and + :vartype key: bool + :ivar retrievable: A value indicating whether the field can be returned in a search result. You + can disable this option if you want to use a field (for example, margin) as a filter, sorting, + or scoring mechanism but do not want the field to be visible to the end user. This property + must be true for key fields, and it must be null for complex fields. This property can be + changed on existing fields. Enabling this property does not cause any increase in index storage + requirements. Default is true for simple fields and null for complex fields. + :vartype retrievable: bool + :ivar searchable: A value indicating whether the field is full-text searchable. This means it + will undergo analysis such as word-breaking during indexing. If you set a searchable field to a + value like "sunny day", internally it will be split into the individual tokens "sunny" and "day". This enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String) are searchable by default. This property must be false for simple fields of other non-string data types, and it must be null for complex fields. Note: searchable fields consume extra space in your index since Azure Cognitive Search will store an additional tokenized version of the field value for full-text searches. If you want to save space in your index and you don't need a field to be included in searches, set searchable to false. - :paramtype searchable: bool - :keyword filterable: A value indicating whether to enable the field to be referenced in $filter + :vartype searchable: bool + :ivar filterable: A value indicating whether to enable the field to be referenced in $filter queries. filterable differs from searchable in how strings are handled. Fields of type Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property must be null for complex fields. Default is true for simple fields and null for complex fields. - :paramtype filterable: bool - :keyword sortable: A value indicating whether to enable the field to be referenced in $orderby + :vartype filterable: bool + :ivar sortable: A value indicating whether to enable the field to be referenced in $orderby expressions. By default Azure Cognitive Search sorts results by score, but in many experiences users will want to sort by fields in the documents. A simple field can be sortable only if it is single-valued (it has a single value in the scope of the parent document). Simple collection @@ -4701,15 +5927,15 @@ class SearchField(msrest.serialization.Model): cannot be sortable and the sortable property must be null for such fields. The default for sortable is true for single-valued simple fields, false for multi-valued simple fields, and null for complex fields. - :paramtype sortable: bool - :keyword facetable: A value indicating whether to enable the field to be referenced in facet + :vartype sortable: bool + :ivar facetable: A value indicating whether to enable the field to be referenced in facet queries. Typically used in a presentation of search results that includes hit count by category (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields. - :paramtype facetable: bool - :keyword analyzer: The name of the analyzer to use for the field. This option can be used only + :vartype facetable: bool + :ivar analyzer: The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", @@ -4729,11 +5955,11 @@ class SearchField(msrest.serialization.Model): "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", "whitespace". - :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :keyword search_analyzer: The name of the analyzer used at search time for the field. This - option can be used only with searchable fields. It must be set together with indexAnalyzer and - it cannot be set together with the analyzer option. This property cannot be set to the name of - a language analyzer; use the analyzer property instead if you need a language analyzer. This + :vartype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :ivar search_analyzer: The name of the analyzer used at search time for the field. This option + can be used only with searchable fields. It must be set together with indexAnalyzer and it + cannot be set together with the analyzer option. This property cannot be set to the name of a + language analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be updated on an existing field. Must be null for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", @@ -4752,12 +5978,12 @@ class SearchField(msrest.serialization.Model): "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", "whitespace". - :paramtype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :keyword index_analyzer: The name of the analyzer used at indexing time for the field. This - option can be used only with searchable fields. It must be set together with searchAnalyzer and - it cannot be set together with the analyzer option. This property cannot be set to the name of - a language analyzer; use the analyzer property instead if you need a language analyzer. Once - the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. + :vartype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :ivar index_analyzer: The name of the analyzer used at indexing time for the field. This option + can be used only with searchable fields. It must be set together with searchAnalyzer and it + cannot be set together with the analyzer option. This property cannot be set to the name of a + language analyzer; use the analyzer property instead if you need a language analyzer. Once the + analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", @@ -4775,21 +6001,21 @@ class SearchField(msrest.serialization.Model): "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", "whitespace". - :paramtype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :keyword normalizer: The name of the normalizer to use for the field. This option can be used - only with fields with filterable, sortable, or facetable enabled. Once the normalizer is - chosen, it cannot be changed for the field. Must be null for complex fields. Possible values - include: "asciifolding", "elision", "lowercase", "standard", "uppercase". - :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName - :keyword synonym_maps: A list of the names of synonym maps to associate with this field. This + :vartype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :ivar normalizer: The name of the normalizer to use for the field. This option can be used only + with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it + cannot be changed for the field. Must be null for complex fields. Possible values include: + "asciifolding", "elision", "lowercase", "standard", "uppercase". + :vartype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName + :ivar synonym_maps: A list of the names of synonym maps to associate with this field. This option can be used only with searchable fields. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are expanded at query-time using the rules in the synonym map. This attribute can be changed on existing fields. Must be null or an empty collection for complex fields. - :paramtype synonym_maps: list[str] - :keyword fields: A list of sub-fields if this is a field of type Edm.ComplexType or + :vartype synonym_maps: list[str] + :ivar fields: A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields. - :paramtype fields: list[~azure.search.documents.indexes.models.SearchField] + :vartype fields: list[~azure.search.documents.indexes.models.SearchField] """ _validation = { @@ -4833,6 +6059,143 @@ def __init__( fields: Optional[List["SearchField"]] = None, **kwargs ): + """ + :keyword name: Required. The name of the field, which must be unique within the fields + collection of the index or parent field. + :paramtype name: str + :keyword type: Required. The data type of the field. Possible values include: "Edm.String", + "Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset", + "Edm.GeographyPoint", "Edm.ComplexType". + :paramtype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType + :keyword key: A value indicating whether the field uniquely identifies documents in the index. + Exactly one top-level field in each index must be chosen as the key field and it must be of + type Edm.String. Key fields can be used to look up documents directly and update or delete + specific documents. Default is false for simple fields and null for complex fields. + :paramtype key: bool + :keyword retrievable: A value indicating whether the field can be returned in a search result. + You can disable this option if you want to use a field (for example, margin) as a filter, + sorting, or scoring mechanism but do not want the field to be visible to the end user. This + property must be true for key fields, and it must be null for complex fields. This property can + be changed on existing fields. Enabling this property does not cause any increase in index + storage requirements. Default is true for simple fields and null for complex fields. + :paramtype retrievable: bool + :keyword searchable: A value indicating whether the field is full-text searchable. This means + it will undergo analysis such as word-breaking during indexing. If you set a searchable field + to a value like "sunny day", internally it will be split into the individual tokens "sunny" and + "day". This enables full-text searches for these terms. Fields of type Edm.String or + Collection(Edm.String) are searchable by default. This property must be false for simple fields + of other non-string data types, and it must be null for complex fields. Note: searchable fields + consume extra space in your index since Azure Cognitive Search will store an additional + tokenized version of the field value for full-text searches. If you want to save space in your + index and you don't need a field to be included in searches, set searchable to false. + :paramtype searchable: bool + :keyword filterable: A value indicating whether to enable the field to be referenced in $filter + queries. filterable differs from searchable in how strings are handled. Fields of type + Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so + comparisons are for exact matches only. For example, if you set such a field f to "sunny day", + $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property + must be null for complex fields. Default is true for simple fields and null for complex fields. + :paramtype filterable: bool + :keyword sortable: A value indicating whether to enable the field to be referenced in $orderby + expressions. By default Azure Cognitive Search sorts results by score, but in many experiences + users will want to sort by fields in the documents. A simple field can be sortable only if it + is single-valued (it has a single value in the scope of the parent document). Simple collection + fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex + collections are also multi-valued, and therefore cannot be sortable. This is true whether it's + an immediate parent field, or an ancestor field, that's the complex collection. Complex fields + cannot be sortable and the sortable property must be null for such fields. The default for + sortable is true for single-valued simple fields, false for multi-valued simple fields, and + null for complex fields. + :paramtype sortable: bool + :keyword facetable: A value indicating whether to enable the field to be referenced in facet + queries. Typically used in a presentation of search results that includes hit count by category + (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so + on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or + Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple + fields. + :paramtype facetable: bool + :keyword analyzer: The name of the analyzer to use for the field. This option can be used only + with searchable fields and it can't be set together with either searchAnalyzer or + indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null + for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", + "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", + "pt-PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", "whitespace". + :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :keyword search_analyzer: The name of the analyzer used at search time for the field. This + option can be used only with searchable fields. It must be set together with indexAnalyzer and + it cannot be set together with the analyzer option. This property cannot be set to the name of + a language analyzer; use the analyzer property instead if you need a language analyzer. This + analyzer can be updated on an existing field. Must be null for complex fields. Possible values + include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", + "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", + "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", + "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", + "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", + "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", + "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", + "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", + "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", + "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", + "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", + "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", + "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :paramtype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :keyword index_analyzer: The name of the analyzer used at indexing time for the field. This + option can be used only with searchable fields. It must be set together with searchAnalyzer and + it cannot be set together with the analyzer option. This property cannot be set to the name of + a language analyzer; use the analyzer property instead if you need a language analyzer. Once + the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. + Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", + "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", + "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", + "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", + "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", + "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", + "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", + "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", + "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", + "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", + "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", + "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", + "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :paramtype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :keyword normalizer: The name of the normalizer to use for the field. This option can be used + only with fields with filterable, sortable, or facetable enabled. Once the normalizer is + chosen, it cannot be changed for the field. Must be null for complex fields. Possible values + include: "asciifolding", "elision", "lowercase", "standard", "uppercase". + :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName + :keyword synonym_maps: A list of the names of synonym maps to associate with this field. This + option can be used only with searchable fields. Currently only one synonym map per field is + supported. Assigning a synonym map to a field ensures that query terms targeting that field are + expanded at query-time using the rules in the synonym map. This attribute can be changed on + existing fields. Must be null or an empty collection for complex fields. + :paramtype synonym_maps: list[str] + :keyword fields: A list of sub-fields if this is a field of type Edm.ComplexType or + Collection(Edm.ComplexType). Must be null or empty for simple fields. + :paramtype fields: list[~azure.search.documents.indexes.models.SearchField] + """ super(SearchField, self).__init__(**kwargs) self.name = name self.type = type @@ -4855,31 +6218,31 @@ class SearchIndex(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the index. - :paramtype name: str - :keyword fields: Required. The fields of the index. - :paramtype fields: list[~azure.search.documents.indexes.models.SearchField] - :keyword scoring_profiles: The scoring profiles for the index. - :paramtype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile] - :keyword default_scoring_profile: The name of the scoring profile to use if none is specified - in the query. If this property is not set and no scoring profile is specified in the query, - then default scoring (tf-idf) will be used. - :paramtype default_scoring_profile: str - :keyword cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. - :paramtype cors_options: ~azure.search.documents.indexes.models.CorsOptions - :keyword suggesters: The suggesters for the index. - :paramtype suggesters: list[~azure.search.documents.indexes.models.Suggester] - :keyword analyzers: The analyzers for the index. - :paramtype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer] - :keyword tokenizers: The tokenizers for the index. - :paramtype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer] - :keyword token_filters: The token filters for the index. - :paramtype token_filters: list[~azure.search.documents.indexes.models.TokenFilter] - :keyword char_filters: The character filters for the index. - :paramtype char_filters: list[~azure.search.documents.indexes.models.CharFilter] - :keyword normalizers: The normalizers for the index. - :paramtype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer] - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + :ivar name: Required. The name of the index. + :vartype name: str + :ivar fields: Required. The fields of the index. + :vartype fields: list[~azure.search.documents.indexes.models.SearchField] + :ivar scoring_profiles: The scoring profiles for the index. + :vartype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile] + :ivar default_scoring_profile: The name of the scoring profile to use if none is specified in + the query. If this property is not set and no scoring profile is specified in the query, then + default scoring (tf-idf) will be used. + :vartype default_scoring_profile: str + :ivar cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. + :vartype cors_options: ~azure.search.documents.indexes.models.CorsOptions + :ivar suggesters: The suggesters for the index. + :vartype suggesters: list[~azure.search.documents.indexes.models.Suggester] + :ivar analyzers: The analyzers for the index. + :vartype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer] + :ivar tokenizers: The tokenizers for the index. + :vartype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer] + :ivar token_filters: The token filters for the index. + :vartype token_filters: list[~azure.search.documents.indexes.models.TokenFilter] + :ivar char_filters: The character filters for the index. + :vartype char_filters: list[~azure.search.documents.indexes.models.CharFilter] + :ivar normalizers: The normalizers for the index. + :vartype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer] + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive @@ -4887,14 +6250,14 @@ class SearchIndex(msrest.serialization.Model): needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :keyword similarity: The type of similarity algorithm to be used when scoring and ranking the + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :ivar similarity: The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. - :paramtype similarity: ~azure.search.documents.indexes.models.Similarity - :keyword e_tag: The ETag of the index. - :paramtype e_tag: str + :vartype similarity: ~azure.search.documents.indexes.models.Similarity + :ivar e_tag: The ETag of the index. + :vartype e_tag: str """ _validation = { @@ -4938,6 +6301,48 @@ def __init__( e_tag: Optional[str] = None, **kwargs ): + """ + :keyword name: Required. The name of the index. + :paramtype name: str + :keyword fields: Required. The fields of the index. + :paramtype fields: list[~azure.search.documents.indexes.models.SearchField] + :keyword scoring_profiles: The scoring profiles for the index. + :paramtype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile] + :keyword default_scoring_profile: The name of the scoring profile to use if none is specified + in the query. If this property is not set and no scoring profile is specified in the query, + then default scoring (tf-idf) will be used. + :paramtype default_scoring_profile: str + :keyword cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. + :paramtype cors_options: ~azure.search.documents.indexes.models.CorsOptions + :keyword suggesters: The suggesters for the index. + :paramtype suggesters: list[~azure.search.documents.indexes.models.Suggester] + :keyword analyzers: The analyzers for the index. + :paramtype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer] + :keyword tokenizers: The tokenizers for the index. + :paramtype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer] + :keyword token_filters: The token filters for the index. + :paramtype token_filters: list[~azure.search.documents.indexes.models.TokenFilter] + :keyword char_filters: The character filters for the index. + :paramtype char_filters: list[~azure.search.documents.indexes.models.CharFilter] + :keyword normalizers: The normalizers for the index. + :paramtype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer] + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your data when you + want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive + Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive + Search will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with + customer-managed keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :keyword similarity: The type of similarity algorithm to be used when scoring and ranking the + documents matching a search query. The similarity algorithm can only be defined at index + creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity + algorithm is used. + :paramtype similarity: ~azure.search.documents.indexes.models.Similarity + :keyword e_tag: The ETag of the index. + :paramtype e_tag: str + """ super(SearchIndex, self).__init__(**kwargs) self.name = name self.fields = fields @@ -4960,32 +6365,32 @@ class SearchIndexer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the indexer. - :paramtype name: str - :keyword description: The description of the indexer. - :paramtype description: str - :keyword data_source_name: Required. The name of the datasource from which this indexer reads + :ivar name: Required. The name of the indexer. + :vartype name: str + :ivar description: The description of the indexer. + :vartype description: str + :ivar data_source_name: Required. The name of the datasource from which this indexer reads data. - :paramtype data_source_name: str - :keyword skillset_name: The name of the skillset executing with this indexer. - :paramtype skillset_name: str - :keyword target_index_name: Required. The name of the index to which this indexer writes data. - :paramtype target_index_name: str - :keyword schedule: The schedule for this indexer. - :paramtype schedule: ~azure.search.documents.indexes.models.IndexingSchedule - :keyword parameters: Parameters for indexer execution. - :paramtype parameters: ~azure.search.documents.indexes.models.IndexingParameters - :keyword field_mappings: Defines mappings between fields in the data source and corresponding + :vartype data_source_name: str + :ivar skillset_name: The name of the skillset executing with this indexer. + :vartype skillset_name: str + :ivar target_index_name: Required. The name of the index to which this indexer writes data. + :vartype target_index_name: str + :ivar schedule: The schedule for this indexer. + :vartype schedule: ~azure.search.documents.indexes.models.IndexingSchedule + :ivar parameters: Parameters for indexer execution. + :vartype parameters: ~azure.search.documents.indexes.models.IndexingParameters + :ivar field_mappings: Defines mappings between fields in the data source and corresponding target fields in the index. - :paramtype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] - :keyword output_field_mappings: Output field mappings are applied after enrichment and - immediately before indexing. - :paramtype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] - :keyword is_disabled: A value indicating whether the indexer is disabled. Default is false. - :paramtype is_disabled: bool - :keyword e_tag: The ETag of the indexer. - :paramtype e_tag: str - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + :vartype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] + :ivar output_field_mappings: Output field mappings are applied after enrichment and immediately + before indexing. + :vartype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] + :ivar is_disabled: A value indicating whether the indexer is disabled. Default is false. + :vartype is_disabled: bool + :ivar e_tag: The ETag of the indexer. + :vartype e_tag: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your indexer definition (as well as indexer execution status) when you want full assurance that no one, not even Microsoft, can decrypt them in Azure Cognitive Search. Once you have encrypted your @@ -4994,10 +6399,10 @@ class SearchIndexer(msrest.serialization.Model): rotate your encryption key; Your indexer definition (and indexer execution status) will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :keyword cache: Adds caching to an enrichment pipeline to allow for incremental modification - steps without having to rebuild the index every time. - :paramtype cache: ~azure.search.documents.indexes.models.SearchIndexerCache + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :ivar cache: Adds caching to an enrichment pipeline to allow for incremental modification steps + without having to rebuild the index every time. + :vartype cache: ~azure.search.documents.indexes.models.SearchIndexerCache """ _validation = { @@ -5040,6 +6445,46 @@ def __init__( cache: Optional["SearchIndexerCache"] = None, **kwargs ): + """ + :keyword name: Required. The name of the indexer. + :paramtype name: str + :keyword description: The description of the indexer. + :paramtype description: str + :keyword data_source_name: Required. The name of the datasource from which this indexer reads + data. + :paramtype data_source_name: str + :keyword skillset_name: The name of the skillset executing with this indexer. + :paramtype skillset_name: str + :keyword target_index_name: Required. The name of the index to which this indexer writes data. + :paramtype target_index_name: str + :keyword schedule: The schedule for this indexer. + :paramtype schedule: ~azure.search.documents.indexes.models.IndexingSchedule + :keyword parameters: Parameters for indexer execution. + :paramtype parameters: ~azure.search.documents.indexes.models.IndexingParameters + :keyword field_mappings: Defines mappings between fields in the data source and corresponding + target fields in the index. + :paramtype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] + :keyword output_field_mappings: Output field mappings are applied after enrichment and + immediately before indexing. + :paramtype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] + :keyword is_disabled: A value indicating whether the indexer is disabled. Default is false. + :paramtype is_disabled: bool + :keyword e_tag: The ETag of the indexer. + :paramtype e_tag: str + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your indexer + definition (as well as indexer execution status) when you want full assurance that no one, not + even Microsoft, can decrypt them in Azure Cognitive Search. Once you have encrypted your + indexer definition, it will always remain encrypted. Azure Cognitive Search will ignore + attempts to set this property to null. You can change this property as needed if you want to + rotate your encryption key; Your indexer definition (and indexer execution status) will be + unaffected. Encryption with customer-managed keys is not available for free search services, + and is only available for paid services created on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :keyword cache: Adds caching to an enrichment pipeline to allow for incremental modification + steps without having to rebuild the index every time. + :paramtype cache: ~azure.search.documents.indexes.models.SearchIndexerCache + """ super(SearchIndexer, self).__init__(**kwargs) self.name = name self.description = description @@ -5059,11 +6504,11 @@ def __init__( class SearchIndexerCache(msrest.serialization.Model): """SearchIndexerCache. - :keyword storage_connection_string: The connection string to the storage account where the - cache data will be persisted. - :paramtype storage_connection_string: str - :keyword enable_reprocessing: Specifies whether incremental reprocessing is enabled. - :paramtype enable_reprocessing: bool + :ivar storage_connection_string: The connection string to the storage account where the cache + data will be persisted. + :vartype storage_connection_string: str + :ivar enable_reprocessing: Specifies whether incremental reprocessing is enabled. + :vartype enable_reprocessing: bool """ _attribute_map = { @@ -5078,6 +6523,13 @@ def __init__( enable_reprocessing: Optional[bool] = None, **kwargs ): + """ + :keyword storage_connection_string: The connection string to the storage account where the + cache data will be persisted. + :paramtype storage_connection_string: str + :keyword enable_reprocessing: Specifies whether incremental reprocessing is enabled. + :paramtype enable_reprocessing: bool + """ super(SearchIndexerCache, self).__init__(**kwargs) self.storage_connection_string = storage_connection_string self.enable_reprocessing = enable_reprocessing @@ -5088,12 +6540,12 @@ class SearchIndexerDataContainer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the table or view (for Azure SQL data source) or - collection (for CosmosDB data source) that will be indexed. - :paramtype name: str - :keyword query: A query that is applied to this data container. The syntax and meaning of this + :ivar name: Required. The name of the table or view (for Azure SQL data source) or collection + (for CosmosDB data source) that will be indexed. + :vartype name: str + :ivar query: A query that is applied to this data container. The syntax and meaning of this parameter is datasource-specific. Not supported by Azure SQL datasources. - :paramtype query: str + :vartype query: str """ _validation = { @@ -5112,6 +6564,14 @@ def __init__( query: Optional[str] = None, **kwargs ): + """ + :keyword name: Required. The name of the table or view (for Azure SQL data source) or + collection (for CosmosDB data source) that will be indexed. + :paramtype name: str + :keyword query: A query that is applied to this data container. The syntax and meaning of this + parameter is datasource-specific. Not supported by Azure SQL datasources. + :paramtype query: str + """ super(SearchIndexerDataContainer, self).__init__(**kwargs) self.name = name self.query = query @@ -5125,9 +6585,9 @@ class SearchIndexerDataIdentity(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the identity.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by server. - :paramtype odata_type: str + :vartype odata_type: str """ _validation = { @@ -5146,6 +6606,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchIndexerDataIdentity, self).__init__(**kwargs) self.odata_type = None # type: Optional[str] @@ -5155,9 +6617,9 @@ class SearchIndexerDataNoneIdentity(SearchIndexerDataIdentity): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the identity.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by server. - :paramtype odata_type: str + :vartype odata_type: str """ _validation = { @@ -5172,6 +6634,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchIndexerDataNoneIdentity, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SearchIndexerDataNoneIdentity' # type: str @@ -5181,31 +6645,31 @@ class SearchIndexerDataSource(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the datasource. - :paramtype name: str - :keyword description: The description of the datasource. - :paramtype description: str - :keyword type: Required. The type of the datasource. Possible values include: "azuresql", + :ivar name: Required. The name of the datasource. + :vartype name: str + :ivar description: The description of the datasource. + :vartype description: str + :ivar type: Required. The type of the datasource. Possible values include: "azuresql", "cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2". - :paramtype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType - :keyword credentials: Required. Credentials for the datasource. - :paramtype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials - :keyword container: Required. The data container for the datasource. - :paramtype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer - :keyword identity: An explicit managed identity to use for this datasource. If not specified - and the connection string is a managed identity, the system-assigned managed identity is used. - If not specified, the value remains unchanged. If "none" is specified, the value of this - property is cleared. - :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :keyword data_change_detection_policy: The data change detection policy for the datasource. - :paramtype data_change_detection_policy: + :vartype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType + :ivar credentials: Required. Credentials for the datasource. + :vartype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials + :ivar container: Required. The data container for the datasource. + :vartype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer + :ivar identity: An explicit managed identity to use for this datasource. If not specified and + the connection string is a managed identity, the system-assigned managed identity is used. If + not specified, the value remains unchanged. If "none" is specified, the value of this property + is cleared. + :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity + :ivar data_change_detection_policy: The data change detection policy for the datasource. + :vartype data_change_detection_policy: ~azure.search.documents.indexes.models.DataChangeDetectionPolicy - :keyword data_deletion_detection_policy: The data deletion detection policy for the datasource. - :paramtype data_deletion_detection_policy: + :ivar data_deletion_detection_policy: The data deletion detection policy for the datasource. + :vartype data_deletion_detection_policy: ~azure.search.documents.indexes.models.DataDeletionDetectionPolicy - :keyword e_tag: The ETag of the data source. - :paramtype e_tag: str - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + :ivar e_tag: The ETag of the data source. + :vartype e_tag: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your datasource definition when you want full assurance that no one, not even Microsoft, can decrypt your data source definition in Azure Cognitive Search. Once you have encrypted your data source @@ -5214,7 +6678,7 @@ class SearchIndexerDataSource(msrest.serialization.Model): encryption key; Your datasource definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey """ _validation = { @@ -5252,6 +6716,42 @@ def __init__( encryption_key: Optional["SearchResourceEncryptionKey"] = None, **kwargs ): + """ + :keyword name: Required. The name of the datasource. + :paramtype name: str + :keyword description: The description of the datasource. + :paramtype description: str + :keyword type: Required. The type of the datasource. Possible values include: "azuresql", + "cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2". + :paramtype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType + :keyword credentials: Required. Credentials for the datasource. + :paramtype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials + :keyword container: Required. The data container for the datasource. + :paramtype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer + :keyword identity: An explicit managed identity to use for this datasource. If not specified + and the connection string is a managed identity, the system-assigned managed identity is used. + If not specified, the value remains unchanged. If "none" is specified, the value of this + property is cleared. + :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity + :keyword data_change_detection_policy: The data change detection policy for the datasource. + :paramtype data_change_detection_policy: + ~azure.search.documents.indexes.models.DataChangeDetectionPolicy + :keyword data_deletion_detection_policy: The data deletion detection policy for the datasource. + :paramtype data_deletion_detection_policy: + ~azure.search.documents.indexes.models.DataDeletionDetectionPolicy + :keyword e_tag: The ETag of the data source. + :paramtype e_tag: str + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your datasource + definition when you want full assurance that no one, not even Microsoft, can decrypt your data + source definition in Azure Cognitive Search. Once you have encrypted your data source + definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set + this property to null. You can change this property as needed if you want to rotate your + encryption key; Your datasource definition will be unaffected. Encryption with customer-managed + keys is not available for free search services, and is only available for paid services created + on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + """ super(SearchIndexerDataSource, self).__init__(**kwargs) self.name = name self.description = description @@ -5270,14 +6770,14 @@ class SearchIndexerDataUserAssignedIdentity(SearchIndexerDataIdentity): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the identity.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by server. - :paramtype odata_type: str - :keyword user_assigned_identity: Required. The fully qualified Azure resource Id of a user + :vartype odata_type: str + :ivar user_assigned_identity: Required. The fully qualified Azure resource Id of a user assigned managed identity typically in the form "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" that should have been assigned to the search service. - :paramtype user_assigned_identity: str + :vartype user_assigned_identity: str """ _validation = { @@ -5296,6 +6796,13 @@ def __init__( user_assigned_identity: str, **kwargs ): + """ + :keyword user_assigned_identity: Required. The fully qualified Azure resource Id of a user + assigned managed identity typically in the form + "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" + that should have been assigned to the search service. + :paramtype user_assigned_identity: str + """ super(SearchIndexerDataUserAssignedIdentity, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SearchIndexerDataUserAssignedIdentity' # type: str self.user_assigned_identity = user_assigned_identity @@ -5351,6 +6858,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchIndexerError, self).__init__(**kwargs) self.key = None self.error_message = None @@ -5365,11 +6874,11 @@ class SearchIndexerKnowledgeStore(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword storage_connection_string: Required. The connection string to the storage account + :ivar storage_connection_string: Required. The connection string to the storage account projections will be stored in. - :paramtype storage_connection_string: str - :keyword projections: Required. A list of additional projections to perform during indexing. - :paramtype projections: + :vartype storage_connection_string: str + :ivar projections: Required. A list of additional projections to perform during indexing. + :vartype projections: list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreProjection] """ @@ -5390,6 +6899,14 @@ def __init__( projections: List["SearchIndexerKnowledgeStoreProjection"], **kwargs ): + """ + :keyword storage_connection_string: Required. The connection string to the storage account + projections will be stored in. + :paramtype storage_connection_string: str + :keyword projections: Required. A list of additional projections to perform during indexing. + :paramtype projections: + list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreProjection] + """ super(SearchIndexerKnowledgeStore, self).__init__(**kwargs) self.storage_connection_string = storage_connection_string self.projections = projections @@ -5398,16 +6915,16 @@ def __init__( class SearchIndexerKnowledgeStoreProjectionSelector(msrest.serialization.Model): """Abstract class to share properties between concrete selectors. - :keyword reference_key_name: Name of reference key to different projection. - :paramtype reference_key_name: str - :keyword generated_key_name: Name of generated key to store projection under. - :paramtype generated_key_name: str - :keyword source: Source data to project. - :paramtype source: str - :keyword source_context: Source context for complex projections. - :paramtype source_context: str - :keyword inputs: Nested inputs for complex projections. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] """ _attribute_map = { @@ -5428,6 +6945,18 @@ def __init__( inputs: Optional[List["InputFieldMappingEntry"]] = None, **kwargs ): + """ + :keyword reference_key_name: Name of reference key to different projection. + :paramtype reference_key_name: str + :keyword generated_key_name: Name of generated key to store projection under. + :paramtype generated_key_name: str + :keyword source: Source data to project. + :paramtype source: str + :keyword source_context: Source context for complex projections. + :paramtype source_context: str + :keyword inputs: Nested inputs for complex projections. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + """ super(SearchIndexerKnowledgeStoreProjectionSelector, self).__init__(**kwargs) self.reference_key_name = reference_key_name self.generated_key_name = generated_key_name @@ -5441,18 +6970,18 @@ class SearchIndexerKnowledgeStoreBlobProjectionSelector(SearchIndexerKnowledgeSt All required parameters must be populated in order to send to Azure. - :keyword reference_key_name: Name of reference key to different projection. - :paramtype reference_key_name: str - :keyword generated_key_name: Name of generated key to store projection under. - :paramtype generated_key_name: str - :keyword source: Source data to project. - :paramtype source: str - :keyword source_context: Source context for complex projections. - :paramtype source_context: str - :keyword inputs: Nested inputs for complex projections. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword storage_container: Required. Blob container to store projections in. - :paramtype storage_container: str + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar storage_container: Required. Blob container to store projections in. + :vartype storage_container: str """ _validation = { @@ -5479,6 +7008,20 @@ def __init__( inputs: Optional[List["InputFieldMappingEntry"]] = None, **kwargs ): + """ + :keyword reference_key_name: Name of reference key to different projection. + :paramtype reference_key_name: str + :keyword generated_key_name: Name of generated key to store projection under. + :paramtype generated_key_name: str + :keyword source: Source data to project. + :paramtype source: str + :keyword source_context: Source context for complex projections. + :paramtype source_context: str + :keyword inputs: Nested inputs for complex projections. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword storage_container: Required. Blob container to store projections in. + :paramtype storage_container: str + """ super(SearchIndexerKnowledgeStoreBlobProjectionSelector, self).__init__(reference_key_name=reference_key_name, generated_key_name=generated_key_name, source=source, source_context=source_context, inputs=inputs, **kwargs) self.storage_container = storage_container @@ -5488,18 +7031,18 @@ class SearchIndexerKnowledgeStoreFileProjectionSelector(SearchIndexerKnowledgeSt All required parameters must be populated in order to send to Azure. - :keyword reference_key_name: Name of reference key to different projection. - :paramtype reference_key_name: str - :keyword generated_key_name: Name of generated key to store projection under. - :paramtype generated_key_name: str - :keyword source: Source data to project. - :paramtype source: str - :keyword source_context: Source context for complex projections. - :paramtype source_context: str - :keyword inputs: Nested inputs for complex projections. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword storage_container: Required. Blob container to store projections in. - :paramtype storage_container: str + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar storage_container: Required. Blob container to store projections in. + :vartype storage_container: str """ _validation = { @@ -5526,6 +7069,20 @@ def __init__( inputs: Optional[List["InputFieldMappingEntry"]] = None, **kwargs ): + """ + :keyword reference_key_name: Name of reference key to different projection. + :paramtype reference_key_name: str + :keyword generated_key_name: Name of generated key to store projection under. + :paramtype generated_key_name: str + :keyword source: Source data to project. + :paramtype source: str + :keyword source_context: Source context for complex projections. + :paramtype source_context: str + :keyword inputs: Nested inputs for complex projections. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword storage_container: Required. Blob container to store projections in. + :paramtype storage_container: str + """ super(SearchIndexerKnowledgeStoreFileProjectionSelector, self).__init__(reference_key_name=reference_key_name, generated_key_name=generated_key_name, source=source, source_context=source_context, inputs=inputs, storage_container=storage_container, **kwargs) @@ -5534,18 +7091,18 @@ class SearchIndexerKnowledgeStoreObjectProjectionSelector(SearchIndexerKnowledge All required parameters must be populated in order to send to Azure. - :keyword reference_key_name: Name of reference key to different projection. - :paramtype reference_key_name: str - :keyword generated_key_name: Name of generated key to store projection under. - :paramtype generated_key_name: str - :keyword source: Source data to project. - :paramtype source: str - :keyword source_context: Source context for complex projections. - :paramtype source_context: str - :keyword inputs: Nested inputs for complex projections. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword storage_container: Required. Blob container to store projections in. - :paramtype storage_container: str + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar storage_container: Required. Blob container to store projections in. + :vartype storage_container: str """ _validation = { @@ -5572,20 +7129,34 @@ def __init__( inputs: Optional[List["InputFieldMappingEntry"]] = None, **kwargs ): + """ + :keyword reference_key_name: Name of reference key to different projection. + :paramtype reference_key_name: str + :keyword generated_key_name: Name of generated key to store projection under. + :paramtype generated_key_name: str + :keyword source: Source data to project. + :paramtype source: str + :keyword source_context: Source context for complex projections. + :paramtype source_context: str + :keyword inputs: Nested inputs for complex projections. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword storage_container: Required. Blob container to store projections in. + :paramtype storage_container: str + """ super(SearchIndexerKnowledgeStoreObjectProjectionSelector, self).__init__(reference_key_name=reference_key_name, generated_key_name=generated_key_name, source=source, source_context=source_context, inputs=inputs, storage_container=storage_container, **kwargs) class SearchIndexerKnowledgeStoreProjection(msrest.serialization.Model): """Container object for various projection selectors. - :keyword tables: Projections to Azure Table storage. - :paramtype tables: + :ivar tables: Projections to Azure Table storage. + :vartype tables: list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreTableProjectionSelector] - :keyword objects: Projections to Azure Blob storage. - :paramtype objects: + :ivar objects: Projections to Azure Blob storage. + :vartype objects: list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreObjectProjectionSelector] - :keyword files: Projections to Azure File storage. - :paramtype files: + :ivar files: Projections to Azure File storage. + :vartype files: list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreFileProjectionSelector] """ @@ -5603,6 +7174,17 @@ def __init__( files: Optional[List["SearchIndexerKnowledgeStoreFileProjectionSelector"]] = None, **kwargs ): + """ + :keyword tables: Projections to Azure Table storage. + :paramtype tables: + list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreTableProjectionSelector] + :keyword objects: Projections to Azure Blob storage. + :paramtype objects: + list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreObjectProjectionSelector] + :keyword files: Projections to Azure File storage. + :paramtype files: + list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreFileProjectionSelector] + """ super(SearchIndexerKnowledgeStoreProjection, self).__init__(**kwargs) self.tables = tables self.objects = objects @@ -5614,18 +7196,18 @@ class SearchIndexerKnowledgeStoreTableProjectionSelector(SearchIndexerKnowledgeS All required parameters must be populated in order to send to Azure. - :keyword reference_key_name: Name of reference key to different projection. - :paramtype reference_key_name: str - :keyword generated_key_name: Name of generated key to store projection under. - :paramtype generated_key_name: str - :keyword source: Source data to project. - :paramtype source: str - :keyword source_context: Source context for complex projections. - :paramtype source_context: str - :keyword inputs: Nested inputs for complex projections. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword table_name: Required. Name of the Azure table to store projected data in. - :paramtype table_name: str + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar table_name: Required. Name of the Azure table to store projected data in. + :vartype table_name: str """ _validation = { @@ -5652,6 +7234,20 @@ def __init__( inputs: Optional[List["InputFieldMappingEntry"]] = None, **kwargs ): + """ + :keyword reference_key_name: Name of reference key to different projection. + :paramtype reference_key_name: str + :keyword generated_key_name: Name of generated key to store projection under. + :paramtype generated_key_name: str + :keyword source: Source data to project. + :paramtype source: str + :keyword source_context: Source context for complex projections. + :paramtype source_context: str + :keyword inputs: Nested inputs for complex projections. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword table_name: Required. Name of the Azure table to store projected data in. + :paramtype table_name: str + """ super(SearchIndexerKnowledgeStoreTableProjectionSelector, self).__init__(reference_key_name=reference_key_name, generated_key_name=generated_key_name, source=source, source_context=source_context, inputs=inputs, **kwargs) self.table_name = table_name @@ -5688,6 +7284,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchIndexerLimits, self).__init__(**kwargs) self.max_run_time = None self.max_document_extraction_size = None @@ -5699,22 +7297,22 @@ class SearchIndexerSkillset(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the skillset. - :paramtype name: str - :keyword description: The description of the skillset. - :paramtype description: str - :keyword skills: Required. A list of skills in the skillset. - :paramtype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill] - :keyword cognitive_services_account: Details about cognitive services to be used when running + :ivar name: Required. The name of the skillset. + :vartype name: str + :ivar description: The description of the skillset. + :vartype description: str + :ivar skills: Required. A list of skills in the skillset. + :vartype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill] + :ivar cognitive_services_account: Details about cognitive services to be used when running skills. - :paramtype cognitive_services_account: + :vartype cognitive_services_account: ~azure.search.documents.indexes.models.CognitiveServicesAccount - :keyword knowledge_store: Definition of additional projections to azure blob, table, or files, - of enriched data. - :paramtype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore - :keyword e_tag: The ETag of the skillset. - :paramtype e_tag: str - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + :ivar knowledge_store: Definition of additional projections to azure blob, table, or files, of + enriched data. + :vartype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore + :ivar e_tag: The ETag of the skillset. + :vartype e_tag: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your skillset definition when you want full assurance that no one, not even Microsoft, can decrypt your skillset definition in Azure Cognitive Search. Once you have encrypted your skillset @@ -5723,7 +7321,7 @@ class SearchIndexerSkillset(msrest.serialization.Model): encryption key; Your skillset definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey """ _validation = { @@ -5753,6 +7351,33 @@ def __init__( encryption_key: Optional["SearchResourceEncryptionKey"] = None, **kwargs ): + """ + :keyword name: Required. The name of the skillset. + :paramtype name: str + :keyword description: The description of the skillset. + :paramtype description: str + :keyword skills: Required. A list of skills in the skillset. + :paramtype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill] + :keyword cognitive_services_account: Details about cognitive services to be used when running + skills. + :paramtype cognitive_services_account: + ~azure.search.documents.indexes.models.CognitiveServicesAccount + :keyword knowledge_store: Definition of additional projections to azure blob, table, or files, + of enriched data. + :paramtype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore + :keyword e_tag: The ETag of the skillset. + :paramtype e_tag: str + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your skillset + definition when you want full assurance that no one, not even Microsoft, can decrypt your + skillset definition in Azure Cognitive Search. Once you have encrypted your skillset + definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set + this property to null. You can change this property as needed if you want to rotate your + encryption key; Your skillset definition will be unaffected. Encryption with customer-managed + keys is not available for free search services, and is only available for paid services created + on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + """ super(SearchIndexerSkillset, self).__init__(**kwargs) self.name = name self.description = description @@ -5800,6 +7425,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchIndexerStatus, self).__init__(**kwargs) self.status = None self.last_result = None @@ -5850,6 +7477,8 @@ def __init__( self, **kwargs ): + """ + """ super(SearchIndexerWarning, self).__init__(**kwargs) self.key = None self.message = None @@ -5863,25 +7492,25 @@ class SearchResourceEncryptionKey(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword key_name: Required. The name of your Azure Key Vault key to be used to encrypt your + :ivar key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data + at rest. + :vartype key_name: str + :ivar key_version: Required. The version of your Azure Key Vault key to be used to encrypt your data at rest. - :paramtype key_name: str - :keyword key_version: Required. The version of your Azure Key Vault key to be used to encrypt - your data at rest. - :paramtype key_version: str - :keyword vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, - that contains the key to be used to encrypt your data at rest. An example URI might be + :vartype key_version: str + :ivar vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that + contains the key to be used to encrypt your data at rest. An example URI might be https://my-keyvault-name.vault.azure.net. - :paramtype vault_uri: str - :keyword access_credentials: Optional Azure Active Directory credentials used for accessing - your Azure Key Vault. Not required if using managed identity instead. - :paramtype access_credentials: + :vartype vault_uri: str + :ivar access_credentials: Optional Azure Active Directory credentials used for accessing your + Azure Key Vault. Not required if using managed identity instead. + :vartype access_credentials: ~azure.search.documents.indexes.models.AzureActiveDirectoryApplicationCredentials - :keyword identity: An explicit managed identity to use for this encryption key. If not - specified and the access credentials property is null, the system-assigned managed identity is - used. On update to the resource, if the explicit identity is unspecified, it remains unchanged. - If "none" is specified, the value of this property is cleared. - :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity + :ivar identity: An explicit managed identity to use for this encryption key. If not specified + and the access credentials property is null, the system-assigned managed identity is used. On + update to the resource, if the explicit identity is unspecified, it remains unchanged. If + "none" is specified, the value of this property is cleared. + :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity """ _validation = { @@ -5908,6 +7537,27 @@ def __init__( identity: Optional["SearchIndexerDataIdentity"] = None, **kwargs ): + """ + :keyword key_name: Required. The name of your Azure Key Vault key to be used to encrypt your + data at rest. + :paramtype key_name: str + :keyword key_version: Required. The version of your Azure Key Vault key to be used to encrypt + your data at rest. + :paramtype key_version: str + :keyword vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, + that contains the key to be used to encrypt your data at rest. An example URI might be + https://my-keyvault-name.vault.azure.net. + :paramtype vault_uri: str + :keyword access_credentials: Optional Azure Active Directory credentials used for accessing + your Azure Key Vault. Not required if using managed identity instead. + :paramtype access_credentials: + ~azure.search.documents.indexes.models.AzureActiveDirectoryApplicationCredentials + :keyword identity: An explicit managed identity to use for this encryption key. If not + specified and the access credentials property is null, the system-assigned managed identity is + used. On update to the resource, if the explicit identity is unspecified, it remains unchanged. + If "none" is specified, the value of this property is cleared. + :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity + """ super(SearchResourceEncryptionKey, self).__init__(**kwargs) self.key_name = key_name self.key_version = key_version @@ -5921,30 +7571,29 @@ class SentimentSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", "ru", "es", "sv", "tr". - :paramtype default_language_code: str or + :vartype default_language_code: str or ~azure.search.documents.indexes.models.SentimentSkillLanguage """ @@ -5975,6 +7624,30 @@ def __init__( default_language_code: Optional[Union[str, "SentimentSkillLanguage"]] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", + "ru", "es", "sv", "tr". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.SentimentSkillLanguage + """ super(SentimentSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Text.SentimentSkill' # type: str self.default_language_code = default_language_code @@ -5985,36 +7658,35 @@ class SentimentSkillV3(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. - :paramtype default_language_code: str - :keyword include_opinion_mining: If set to true, the skill output will include information from + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. + :vartype default_language_code: str + :ivar include_opinion_mining: If set to true, the skill output will include information from Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated assessment (adjective) in the text. Default is false. - :paramtype include_opinion_mining: bool - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str + :vartype include_opinion_mining: bool + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It will default to the latest available when not specified. We recommend you do not specify + this value unless absolutely necessary. + :vartype model_version: str """ _validation = { @@ -6048,6 +7720,35 @@ def __init__( model_version: Optional[str] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + :paramtype default_language_code: str + :keyword include_opinion_mining: If set to true, the skill output will include information from + Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated + assessment (adjective) in the text. Default is false. + :paramtype include_opinion_mining: bool + :keyword model_version: The version of the model to use when calling the Text Analytics + service. It will default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary. + :paramtype model_version: str + """ super(SentimentSkillV3, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Text.V3.SentimentSkill' # type: str self.default_language_code = default_language_code @@ -6060,21 +7761,20 @@ class ServiceCounters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword document_counter: Required. Total number of documents across all indexes in the - service. - :paramtype document_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword index_counter: Required. Total number of indexes. - :paramtype index_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword indexer_counter: Required. Total number of indexers. - :paramtype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword data_source_counter: Required. Total number of data sources. - :paramtype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword storage_size_counter: Required. Total size of used storage in bytes. - :paramtype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword synonym_map_counter: Required. Total number of synonym maps. - :paramtype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword skillset_counter: Total number of skillsets. - :paramtype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar document_counter: Required. Total number of documents across all indexes in the service. + :vartype document_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar index_counter: Required. Total number of indexes. + :vartype index_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar indexer_counter: Required. Total number of indexers. + :vartype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar data_source_counter: Required. Total number of data sources. + :vartype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar storage_size_counter: Required. Total size of used storage in bytes. + :vartype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar synonym_map_counter: Required. Total number of synonym maps. + :vartype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter + :ivar skillset_counter: Total number of skillsets. + :vartype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter """ _validation = { @@ -6108,6 +7808,23 @@ def __init__( skillset_counter: Optional["ResourceCounter"] = None, **kwargs ): + """ + :keyword document_counter: Required. Total number of documents across all indexes in the + service. + :paramtype document_counter: ~azure.search.documents.indexes.models.ResourceCounter + :keyword index_counter: Required. Total number of indexes. + :paramtype index_counter: ~azure.search.documents.indexes.models.ResourceCounter + :keyword indexer_counter: Required. Total number of indexers. + :paramtype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter + :keyword data_source_counter: Required. Total number of data sources. + :paramtype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter + :keyword storage_size_counter: Required. Total size of used storage in bytes. + :paramtype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter + :keyword synonym_map_counter: Required. Total number of synonym maps. + :paramtype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter + :keyword skillset_counter: Total number of skillsets. + :paramtype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter + """ super(ServiceCounters, self).__init__(**kwargs) self.document_counter = document_counter self.index_counter = index_counter @@ -6121,17 +7838,17 @@ def __init__( class ServiceLimits(msrest.serialization.Model): """Represents various service level limits. - :keyword max_fields_per_index: The maximum allowed fields per index. - :paramtype max_fields_per_index: int - :keyword max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in - an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. - :paramtype max_field_nesting_depth_per_index: int - :keyword max_complex_collection_fields_per_index: The maximum number of fields of type + :ivar max_fields_per_index: The maximum allowed fields per index. + :vartype max_fields_per_index: int + :ivar max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in an + index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. + :vartype max_field_nesting_depth_per_index: int + :ivar max_complex_collection_fields_per_index: The maximum number of fields of type Collection(Edm.ComplexType) allowed in an index. - :paramtype max_complex_collection_fields_per_index: int - :keyword max_complex_objects_in_collections_per_document: The maximum number of objects in - complex collections allowed per document. - :paramtype max_complex_objects_in_collections_per_document: int + :vartype max_complex_collection_fields_per_index: int + :ivar max_complex_objects_in_collections_per_document: The maximum number of objects in complex + collections allowed per document. + :vartype max_complex_objects_in_collections_per_document: int """ _attribute_map = { @@ -6150,6 +7867,19 @@ def __init__( max_complex_objects_in_collections_per_document: Optional[int] = None, **kwargs ): + """ + :keyword max_fields_per_index: The maximum allowed fields per index. + :paramtype max_fields_per_index: int + :keyword max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in + an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. + :paramtype max_field_nesting_depth_per_index: int + :keyword max_complex_collection_fields_per_index: The maximum number of fields of type + Collection(Edm.ComplexType) allowed in an index. + :paramtype max_complex_collection_fields_per_index: int + :keyword max_complex_objects_in_collections_per_document: The maximum number of objects in + complex collections allowed per document. + :paramtype max_complex_objects_in_collections_per_document: int + """ super(ServiceLimits, self).__init__(**kwargs) self.max_fields_per_index = max_fields_per_index self.max_field_nesting_depth_per_index = max_field_nesting_depth_per_index @@ -6162,10 +7892,10 @@ class ServiceStatistics(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword counters: Required. Service level resource counters. - :paramtype counters: ~azure.search.documents.indexes.models.ServiceCounters - :keyword limits: Required. Service level general limits. - :paramtype limits: ~azure.search.documents.indexes.models.ServiceLimits + :ivar counters: Required. Service level resource counters. + :vartype counters: ~azure.search.documents.indexes.models.ServiceCounters + :ivar limits: Required. Service level general limits. + :vartype limits: ~azure.search.documents.indexes.models.ServiceLimits """ _validation = { @@ -6185,6 +7915,12 @@ def __init__( limits: "ServiceLimits", **kwargs ): + """ + :keyword counters: Required. Service level resource counters. + :paramtype counters: ~azure.search.documents.indexes.models.ServiceCounters + :keyword limits: Required. Service level general limits. + :paramtype limits: ~azure.search.documents.indexes.models.ServiceLimits + """ super(ServiceStatistics, self).__init__(**kwargs) self.counters = counters self.limits = limits @@ -6195,26 +7931,25 @@ class ShaperSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] """ _validation = { @@ -6242,6 +7977,25 @@ def __init__( context: Optional[str] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + """ super(ShaperSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Util.ShaperSkill' # type: str @@ -6251,31 +8005,31 @@ class ShingleTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword max_shingle_size: The maximum shingle size. Default and minimum value is 2. - :paramtype max_shingle_size: int - :keyword min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be - less than the value of maxShingleSize. - :paramtype min_shingle_size: int - :keyword output_unigrams: A value indicating whether the output stream will contain the input + :vartype name: str + :ivar max_shingle_size: The maximum shingle size. Default and minimum value is 2. + :vartype max_shingle_size: int + :ivar min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be less + than the value of maxShingleSize. + :vartype min_shingle_size: int + :ivar output_unigrams: A value indicating whether the output stream will contain the input tokens (unigrams) as well as shingles. Default is true. - :paramtype output_unigrams: bool - :keyword output_unigrams_if_no_shingles: A value indicating whether to output unigrams for - those times when no shingles are available. This property takes precedence when outputUnigrams - is set to false. Default is false. - :paramtype output_unigrams_if_no_shingles: bool - :keyword token_separator: The string to use when joining adjacent tokens to form a shingle. + :vartype output_unigrams: bool + :ivar output_unigrams_if_no_shingles: A value indicating whether to output unigrams for those + times when no shingles are available. This property takes precedence when outputUnigrams is set + to false. Default is false. + :vartype output_unigrams_if_no_shingles: bool + :ivar token_separator: The string to use when joining adjacent tokens to form a shingle. Default is a single space (" "). - :paramtype token_separator: str - :keyword filter_token: The string to insert for each position at which there is no token. - Default is an underscore ("_"). - :paramtype filter_token: str + :vartype token_separator: str + :ivar filter_token: The string to insert for each position at which there is no token. Default + is an underscore ("_"). + :vartype filter_token: str """ _validation = { @@ -6308,6 +8062,30 @@ def __init__( filter_token: Optional[str] = "_", **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_shingle_size: The maximum shingle size. Default and minimum value is 2. + :paramtype max_shingle_size: int + :keyword min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be + less than the value of maxShingleSize. + :paramtype min_shingle_size: int + :keyword output_unigrams: A value indicating whether the output stream will contain the input + tokens (unigrams) as well as shingles. Default is true. + :paramtype output_unigrams: bool + :keyword output_unigrams_if_no_shingles: A value indicating whether to output unigrams for + those times when no shingles are available. This property takes precedence when outputUnigrams + is set to false. Default is false. + :paramtype output_unigrams_if_no_shingles: bool + :keyword token_separator: The string to use when joining adjacent tokens to form a shingle. + Default is a single space (" "). + :paramtype token_separator: str + :keyword filter_token: The string to insert for each position at which there is no token. + Default is an underscore ("_"). + :paramtype filter_token: str + """ super(ShingleTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.ShingleTokenFilter' # type: str self.max_shingle_size = max_shingle_size @@ -6318,23 +8096,48 @@ def __init__( self.filter_token = filter_token +class SkillNames(msrest.serialization.Model): + """SkillNames. + + :ivar skill_names: the names of skills to be reset. + :vartype skill_names: list[str] + """ + + _attribute_map = { + 'skill_names': {'key': 'skillNames', 'type': '[str]'}, + } + + def __init__( + self, + *, + skill_names: Optional[List[str]] = None, + **kwargs + ): + """ + :keyword skill_names: the names of skills to be reset. + :paramtype skill_names: list[str] + """ + super(SkillNames, self).__init__(**kwargs) + self.skill_names = skill_names + + class SnowballTokenFilter(TokenFilter): """A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword language: Required. The language to use. Possible values include: "armenian", - "basque", "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", - "hungarian", "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", - "russian", "spanish", "swedish", "turkish". - :paramtype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage + :vartype name: str + :ivar language: Required. The language to use. Possible values include: "armenian", "basque", + "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", "hungarian", + "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", "russian", + "spanish", "swedish", "turkish". + :vartype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage """ _validation = { @@ -6356,6 +8159,17 @@ def __init__( language: Union[str, "SnowballTokenFilterLanguage"], **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword language: Required. The language to use. Possible values include: "armenian", + "basque", "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", + "hungarian", "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", + "russian", "spanish", "swedish", "turkish". + :paramtype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage + """ super(SnowballTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.SnowballTokenFilter' # type: str self.language = language @@ -6366,13 +8180,13 @@ class SoftDeleteColumnDeletionDetectionPolicy(DataDeletionDetectionPolicy): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the data deletion detection + :ivar odata_type: Required. Identifies the concrete type of the data deletion detection policy.Constant filled by server. - :paramtype odata_type: str - :keyword soft_delete_column_name: The name of the column to use for soft-deletion detection. - :paramtype soft_delete_column_name: str - :keyword soft_delete_marker_value: The marker value that identifies an item as deleted. - :paramtype soft_delete_marker_value: str + :vartype odata_type: str + :ivar soft_delete_column_name: The name of the column to use for soft-deletion detection. + :vartype soft_delete_column_name: str + :ivar soft_delete_marker_value: The marker value that identifies an item as deleted. + :vartype soft_delete_marker_value: str """ _validation = { @@ -6392,6 +8206,12 @@ def __init__( soft_delete_marker_value: Optional[str] = None, **kwargs ): + """ + :keyword soft_delete_column_name: The name of the column to use for soft-deletion detection. + :paramtype soft_delete_column_name: str + :keyword soft_delete_marker_value: The marker value that identifies an item as deleted. + :paramtype soft_delete_marker_value: str + """ super(SoftDeleteColumnDeletionDetectionPolicy, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy' # type: str self.soft_delete_column_name = soft_delete_column_name @@ -6403,35 +8223,34 @@ class SplitSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is en. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt". - :paramtype default_language_code: str or + :vartype default_language_code: str or ~azure.search.documents.indexes.models.SplitSkillLanguage - :keyword text_split_mode: A value indicating which split mode to perform. Possible values - include: "pages", "sentences". - :paramtype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode - :keyword maximum_page_length: The desired maximum page length. Default is 10000. - :paramtype maximum_page_length: int + :ivar text_split_mode: A value indicating which split mode to perform. Possible values include: + "pages", "sentences". + :vartype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode + :ivar maximum_page_length: The desired maximum page length. Default is 10000. + :vartype maximum_page_length: int """ _validation = { @@ -6465,6 +8284,34 @@ def __init__( maximum_page_length: Optional[int] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_language_code: A value indicating which language code to use. Default is en. + Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt". + :paramtype default_language_code: str or + ~azure.search.documents.indexes.models.SplitSkillLanguage + :keyword text_split_mode: A value indicating which split mode to perform. Possible values + include: "pages", "sentences". + :paramtype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode + :keyword maximum_page_length: The desired maximum page length. Default is 10000. + :paramtype maximum_page_length: int + """ super(SplitSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Text.SplitSkill' # type: str self.default_language_code = default_language_code @@ -6477,9 +8324,9 @@ class SqlIntegratedChangeTrackingPolicy(DataChangeDetectionPolicy): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the data change detection + :ivar odata_type: Required. Identifies the concrete type of the data change detection policy.Constant filled by server. - :paramtype odata_type: str + :vartype odata_type: str """ _validation = { @@ -6494,6 +8341,8 @@ def __init__( self, **kwargs ): + """ + """ super(SqlIntegratedChangeTrackingPolicy, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' # type: str @@ -6503,16 +8352,16 @@ class StemmerOverrideTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword rules: Required. A list of stemming rules in the following format: "word => stem", for + :vartype name: str + :ivar rules: Required. A list of stemming rules in the following format: "word => stem", for example: "ran => run". - :paramtype rules: list[str] + :vartype rules: list[str] """ _validation = { @@ -6534,6 +8383,15 @@ def __init__( rules: List[str], **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword rules: Required. A list of stemming rules in the following format: "word => stem", for + example: "ran => run". + :paramtype rules: list[str] + """ super(StemmerOverrideTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' # type: str self.rules = rules @@ -6544,23 +8402,23 @@ class StemmerTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword language: Required. The language to use. Possible values include: "arabic", - "armenian", "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", - "dutchKp", "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", - "lovins", "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician", + :vartype name: str + :ivar language: Required. The language to use. Possible values include: "arabic", "armenian", + "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "dutchKp", + "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", "lovins", + "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician", "minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi", "hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani", "latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk", "portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian", "lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish". - :paramtype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage + :vartype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage """ _validation = { @@ -6582,6 +8440,22 @@ def __init__( language: Union[str, "StemmerTokenFilterLanguage"], **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword language: Required. The language to use. Possible values include: "arabic", + "armenian", "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", + "dutchKp", "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", + "lovins", "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician", + "minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi", + "hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani", + "latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk", + "portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian", + "lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish". + :paramtype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage + """ super(StemmerTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.StemmerTokenFilter' # type: str self.language = language @@ -6592,15 +8466,15 @@ class StopAnalyzer(LexicalAnalyzer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the analyzer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword stopwords: A list of stopwords. - :paramtype stopwords: list[str] + :vartype odata_type: str + :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar stopwords: A list of stopwords. + :vartype stopwords: list[str] """ _validation = { @@ -6621,6 +8495,14 @@ def __init__( stopwords: Optional[List[str]] = None, **kwargs ): + """ + :keyword name: Required. The name of the analyzer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword stopwords: A list of stopwords. + :paramtype stopwords: list[str] + """ super(StopAnalyzer, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.StopAnalyzer' # type: str self.stopwords = stopwords @@ -6631,29 +8513,29 @@ class StopwordsTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword stopwords: The list of stopwords. This property and the stopwords list property cannot + :vartype name: str + :ivar stopwords: The list of stopwords. This property and the stopwords list property cannot both be set. - :paramtype stopwords: list[str] - :keyword stopwords_list: A predefined list of stopwords to use. This property and the stopwords + :vartype stopwords: list[str] + :ivar stopwords_list: A predefined list of stopwords to use. This property and the stopwords property cannot both be set. Default is English. Possible values include: "arabic", "armenian", "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english", "finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian", "irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian", "sorani", "spanish", "swedish", "thai", "turkish". - :paramtype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList - :keyword ignore_case: A value indicating whether to ignore case. If true, all words are - converted to lower case first. Default is false. - :paramtype ignore_case: bool - :keyword remove_trailing_stop_words: A value indicating whether to ignore the last search term - if it's a stop word. Default is true. - :paramtype remove_trailing_stop_words: bool + :vartype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList + :ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted + to lower case first. Default is false. + :vartype ignore_case: bool + :ivar remove_trailing_stop_words: A value indicating whether to ignore the last search term if + it's a stop word. Default is true. + :vartype remove_trailing_stop_words: bool """ _validation = { @@ -6680,6 +8562,28 @@ def __init__( remove_trailing_stop_words: Optional[bool] = True, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword stopwords: The list of stopwords. This property and the stopwords list property cannot + both be set. + :paramtype stopwords: list[str] + :keyword stopwords_list: A predefined list of stopwords to use. This property and the stopwords + property cannot both be set. Default is English. Possible values include: "arabic", "armenian", + "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english", + "finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian", + "irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian", + "sorani", "spanish", "swedish", "thai", "turkish". + :paramtype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList + :keyword ignore_case: A value indicating whether to ignore case. If true, all words are + converted to lower case first. Default is false. + :paramtype ignore_case: bool + :keyword remove_trailing_stop_words: A value indicating whether to ignore the last search term + if it's a stop word. Default is true. + :paramtype remove_trailing_stop_words: bool + """ super(StopwordsTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.StopwordsTokenFilter' # type: str self.stopwords = stopwords @@ -6695,14 +8599,14 @@ class Suggester(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the suggester. - :paramtype name: str + :ivar name: Required. The name of the suggester. + :vartype name: str :ivar search_mode: A value indicating the capabilities of the suggester. Has constant value: "analyzingInfixMatching". :vartype search_mode: str - :keyword source_fields: Required. The list of field names to which the suggester applies. Each + :ivar source_fields: Required. The list of field names to which the suggester applies. Each field must be searchable. - :paramtype source_fields: list[str] + :vartype source_fields: list[str] """ _validation = { @@ -6726,6 +8630,13 @@ def __init__( source_fields: List[str], **kwargs ): + """ + :keyword name: Required. The name of the suggester. + :paramtype name: str + :keyword source_fields: Required. The list of field names to which the suggester applies. Each + field must be searchable. + :paramtype source_fields: list[str] + """ super(Suggester, self).__init__(**kwargs) self.name = name self.source_fields = source_fields @@ -6738,15 +8649,15 @@ class SynonymMap(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword name: Required. The name of the synonym map. - :paramtype name: str + :ivar name: Required. The name of the synonym map. + :vartype name: str :ivar format: The format of the synonym map. Only the 'solr' format is currently supported. Has constant value: "solr". :vartype format: str - :keyword synonyms: Required. A series of synonym rules in the specified synonym map format. The + :ivar synonyms: Required. A series of synonym rules in the specified synonym map format. The rules must be separated by newlines. - :paramtype synonyms: str - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + :vartype synonyms: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive @@ -6754,9 +8665,9 @@ class SynonymMap(msrest.serialization.Model): needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :keyword e_tag: The ETag of the synonym map. - :paramtype e_tag: str + :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :ivar e_tag: The ETag of the synonym map. + :vartype e_tag: str """ _validation = { @@ -6784,6 +8695,24 @@ def __init__( e_tag: Optional[str] = None, **kwargs ): + """ + :keyword name: Required. The name of the synonym map. + :paramtype name: str + :keyword synonyms: Required. A series of synonym rules in the specified synonym map format. The + rules must be separated by newlines. + :paramtype synonyms: str + :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your data when you + want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive + Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive + Search will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with + customer-managed keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey + :keyword e_tag: The ETag of the synonym map. + :paramtype e_tag: str + """ super(SynonymMap, self).__init__(**kwargs) self.name = name self.synonyms = synonyms @@ -6796,30 +8725,30 @@ class SynonymTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword synonyms: Required. A list of synonyms in following one of two formats: 1. incredible, + :vartype name: str + :ivar synonyms: Required. A list of synonyms in following one of two formats: 1. incredible, unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma separated list of equivalent words. Set the expand option to change how this list is interpreted. - :paramtype synonyms: list[str] - :keyword ignore_case: A value indicating whether to case-fold input for matching. Default is + :vartype synonyms: list[str] + :ivar ignore_case: A value indicating whether to case-fold input for matching. Default is false. - :paramtype ignore_case: bool - :keyword expand: A value indicating whether all words in the list of synonyms (if => notation - is not used) will map to one another. If true, all words in the list of synonyms (if => - notation is not used) will map to one another. The following list: incredible, unbelievable, - fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, + :vartype ignore_case: bool + :ivar expand: A value indicating whether all words in the list of synonyms (if => notation is + not used) will map to one another. If true, all words in the list of synonyms (if => notation + is not used) will map to one another. The following list: incredible, unbelievable, fabulous, + amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. Default is true. - :paramtype expand: bool + :vartype expand: bool """ _validation = { @@ -6845,6 +8774,29 @@ def __init__( expand: Optional[bool] = True, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword synonyms: Required. A list of synonyms in following one of two formats: 1. incredible, + unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced + with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma + separated list of equivalent words. Set the expand option to change how this list is + interpreted. + :paramtype synonyms: list[str] + :keyword ignore_case: A value indicating whether to case-fold input for matching. Default is + false. + :paramtype ignore_case: bool + :keyword expand: A value indicating whether all words in the list of synonyms (if => notation + is not used) will map to one another. If true, all words in the list of synonyms (if => + notation is not used) will map to one another. The following list: incredible, unbelievable, + fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, + unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, + fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => + incredible. Default is true. + :paramtype expand: bool + """ super(SynonymTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.SynonymTokenFilter' # type: str self.synonyms = synonyms @@ -6857,21 +8809,21 @@ class TagScoringFunction(ScoringFunction): All required parameters must be populated in order to send to Azure. - :keyword type: Required. Indicates the type of function to use. Valid values include magnitude, + :ivar type: Required. Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case.Constant filled by server. - :paramtype type: str - :keyword field_name: Required. The name of the field used as input to the scoring function. - :paramtype field_name: str - :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal - to 1.0. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document + :vartype type: str + :ivar field_name: Required. The name of the field used as input to the scoring function. + :vartype field_name: str + :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", "logarithmic". - :paramtype interpolation: str or + :vartype interpolation: str or ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :keyword parameters: Required. Parameter values for the tag scoring function. - :paramtype parameters: ~azure.search.documents.indexes.models.TagScoringParameters + :ivar parameters: Required. Parameter values for the tag scoring function. + :vartype parameters: ~azure.search.documents.indexes.models.TagScoringParameters """ _validation = { @@ -6898,6 +8850,20 @@ def __init__( interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None, **kwargs ): + """ + :keyword field_name: Required. The name of the field used as input to the scoring function. + :paramtype field_name: str + :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal + to 1.0. + :paramtype boost: float + :keyword interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :paramtype interpolation: str or + ~azure.search.documents.indexes.models.ScoringFunctionInterpolation + :keyword parameters: Required. Parameter values for the tag scoring function. + :paramtype parameters: ~azure.search.documents.indexes.models.TagScoringParameters + """ super(TagScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) self.type = 'tag' # type: str self.parameters = parameters @@ -6908,9 +8874,9 @@ class TagScoringParameters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword tags_parameter: Required. The name of the parameter passed in search queries to - specify the list of tags to compare against the target field. - :paramtype tags_parameter: str + :ivar tags_parameter: Required. The name of the parameter passed in search queries to specify + the list of tags to compare against the target field. + :vartype tags_parameter: str """ _validation = { @@ -6927,6 +8893,11 @@ def __init__( tags_parameter: str, **kwargs ): + """ + :keyword tags_parameter: Required. The name of the parameter passed in search queries to + specify the list of tags to compare against the target field. + :paramtype tags_parameter: str + """ super(TagScoringParameters, self).__init__(**kwargs) self.tags_parameter = tags_parameter @@ -6936,45 +8907,44 @@ class TextTranslationSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_to_language_code: Required. The language code to translate documents into for + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar default_to_language_code: Required. The language code to translate documents into for documents that don't specify the to language explicitly. Possible values include: "af", "ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa". - :paramtype default_to_language_code: str or + :vartype default_to_language_code: str or ~azure.search.documents.indexes.models.TextTranslationSkillLanguage - :keyword default_from_language_code: The language code to translate documents from for - documents that don't specify the from language explicitly. Possible values include: "af", "ar", - "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", - "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", - "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", - "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", - "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa". - :paramtype default_from_language_code: str or + :ivar default_from_language_code: The language code to translate documents from for documents + that don't specify the from language explicitly. Possible values include: "af", "ar", "bn", + "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", + "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", + "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br", + "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", + "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa". + :vartype default_from_language_code: str or ~azure.search.documents.indexes.models.TextTranslationSkillLanguage - :keyword suggested_from: The language code to translate documents from when neither the + :ivar suggested_from: The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the automatic language detection is unsuccessful. Default is en. Possible values include: "af", "ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", @@ -6983,7 +8953,7 @@ class TextTranslationSkill(SearchIndexerSkill): "pt", "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa". - :paramtype suggested_from: str or + :vartype suggested_from: str or ~azure.search.documents.indexes.models.TextTranslationSkillLanguage """ @@ -7019,6 +8989,54 @@ def __init__( suggested_from: Optional[Union[str, "TextTranslationSkillLanguage"]] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword default_to_language_code: Required. The language code to translate documents into for + documents that don't specify the to language explicitly. Possible values include: "af", "ar", + "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", + "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", + "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", + "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", + "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa". + :paramtype default_to_language_code: str or + ~azure.search.documents.indexes.models.TextTranslationSkillLanguage + :keyword default_from_language_code: The language code to translate documents from for + documents that don't specify the from language explicitly. Possible values include: "af", "ar", + "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", + "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", + "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", + "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", + "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa". + :paramtype default_from_language_code: str or + ~azure.search.documents.indexes.models.TextTranslationSkillLanguage + :keyword suggested_from: The language code to translate documents from when neither the + fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the + automatic language detection is unsuccessful. Default is en. Possible values include: "af", + "ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", + "fj", "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", + "sw", "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", + "pt", "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", + "ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", + "pa". + :paramtype suggested_from: str or + ~azure.search.documents.indexes.models.TextTranslationSkillLanguage + """ super(TextTranslationSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Text.TranslationSkill' # type: str self.default_to_language_code = default_to_language_code @@ -7031,9 +9049,9 @@ class TextWeights(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :keyword weights: Required. The dictionary of per-field weights to boost document scoring. The + :ivar weights: Required. The dictionary of per-field weights to boost document scoring. The keys are field names and the values are the weights for each field. - :paramtype weights: dict[str, float] + :vartype weights: dict[str, float] """ _validation = { @@ -7050,6 +9068,11 @@ def __init__( weights: Dict[str, float], **kwargs ): + """ + :keyword weights: Required. The dictionary of per-field weights to boost document scoring. The + keys are field names and the values are the weights for each field. + :paramtype weights: dict[str, float] + """ super(TextWeights, self).__init__(**kwargs) self.weights = weights @@ -7059,15 +9082,15 @@ class TruncateTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword length: The length at which terms will be truncated. Default and maximum is 300. - :paramtype length: int + :vartype name: str + :ivar length: The length at which terms will be truncated. Default and maximum is 300. + :vartype length: int """ _validation = { @@ -7089,6 +9112,14 @@ def __init__( length: Optional[int] = 300, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword length: The length at which terms will be truncated. Default and maximum is 300. + :paramtype length: int + """ super(TruncateTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.TruncateTokenFilter' # type: str self.length = length @@ -7099,16 +9130,16 @@ class UaxUrlEmailTokenizer(LexicalTokenizer): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by server. - :paramtype odata_type: str - :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + :vartype odata_type: str + :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int + :vartype max_token_length: int """ _validation = { @@ -7130,6 +9161,15 @@ def __init__( max_token_length: Optional[int] = 255, **kwargs ): + """ + :keyword name: Required. The name of the tokenizer. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :paramtype max_token_length: int + """ super(UaxUrlEmailTokenizer, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.UaxUrlEmailTokenizer' # type: str self.max_token_length = max_token_length @@ -7140,16 +9180,16 @@ class UniqueTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword only_on_same_position: A value indicating whether to remove duplicates only at the - same position. Default is false. - :paramtype only_on_same_position: bool + :vartype name: str + :ivar only_on_same_position: A value indicating whether to remove duplicates only at the same + position. Default is false. + :vartype only_on_same_position: bool """ _validation = { @@ -7170,6 +9210,15 @@ def __init__( only_on_same_position: Optional[bool] = False, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword only_on_same_position: A value indicating whether to remove duplicates only at the + same position. Default is false. + :paramtype only_on_same_position: bool + """ super(UniqueTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.UniqueTokenFilter' # type: str self.only_on_same_position = only_on_same_position @@ -7180,39 +9229,38 @@ class WebApiSkill(SearchIndexerSkill): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by + :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by server. - :paramtype odata_type: str - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or - the output of an upstream skill. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: Required. The output of a skill is either a field in a search index, or a - value that can be consumed as an input by another skill. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword uri: Required. The url for the Web API. - :paramtype uri: str - :keyword http_headers: The headers required to make the http request. - :paramtype http_headers: dict[str, str] - :keyword http_method: The method for the http request. - :paramtype http_method: str - :keyword timeout: The desired timeout for the request. Default is 30 seconds. - :paramtype timeout: ~datetime.timedelta - :keyword batch_size: The desired batch size which indicates number of documents. - :paramtype batch_size: int - :keyword degree_of_parallelism: If set, the number of parallel calls that can be made to the - Web API. - :paramtype degree_of_parallelism: int + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar uri: Required. The url for the Web API. + :vartype uri: str + :ivar http_headers: The headers required to make the http request. + :vartype http_headers: dict[str, str] + :ivar http_method: The method for the http request. + :vartype http_method: str + :ivar timeout: The desired timeout for the request. Default is 30 seconds. + :vartype timeout: ~datetime.timedelta + :ivar batch_size: The desired batch size which indicates number of documents. + :vartype batch_size: int + :ivar degree_of_parallelism: If set, the number of parallel calls that can be made to the Web + API. + :vartype degree_of_parallelism: int """ _validation = { @@ -7253,6 +9301,38 @@ def __init__( degree_of_parallelism: Optional[int] = None, **kwargs ): + """ + :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :paramtype name: str + :keyword description: The description of the skill which describes the inputs, outputs, and + usage of the skill. + :paramtype description: str + :keyword context: Represents the level at which operations take place, such as the document + root or document content (for example, /document or /document/content). The default is + /document. + :paramtype context: str + :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or + the output of an upstream skill. + :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :keyword outputs: Required. The output of a skill is either a field in a search index, or a + value that can be consumed as an input by another skill. + :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :keyword uri: Required. The url for the Web API. + :paramtype uri: str + :keyword http_headers: The headers required to make the http request. + :paramtype http_headers: dict[str, str] + :keyword http_method: The method for the http request. + :paramtype http_method: str + :keyword timeout: The desired timeout for the request. Default is 30 seconds. + :paramtype timeout: ~datetime.timedelta + :keyword batch_size: The desired batch size which indicates number of documents. + :paramtype batch_size: int + :keyword degree_of_parallelism: If set, the number of parallel calls that can be made to the + Web API. + :paramtype degree_of_parallelism: int + """ super(WebApiSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type = '#Microsoft.Skills.Custom.WebApiSkill' # type: str self.uri = uri @@ -7268,44 +9348,43 @@ class WordDelimiterTokenFilter(TokenFilter): All required parameters must be populated in order to send to Azure. - :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :paramtype odata_type: str - :keyword name: Required. The name of the token filter. It must only contain letters, digits, + :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by + server. + :vartype odata_type: str + :ivar name: Required. The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - :paramtype name: str - :keyword generate_word_parts: A value indicating whether to generate part words. If set, causes + :vartype name: str + :ivar generate_word_parts: A value indicating whether to generate part words. If set, causes parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is true. - :paramtype generate_word_parts: bool - :keyword generate_number_parts: A value indicating whether to generate number subwords. Default - is true. - :paramtype generate_number_parts: bool - :keyword catenate_words: A value indicating whether maximum runs of word parts will be - catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default - is false. - :paramtype catenate_words: bool - :keyword catenate_numbers: A value indicating whether maximum runs of number parts will be + :vartype generate_word_parts: bool + :ivar generate_number_parts: A value indicating whether to generate number subwords. Default is + true. + :vartype generate_number_parts: bool + :ivar catenate_words: A value indicating whether maximum runs of word parts will be catenated. + For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false. + :vartype catenate_words: bool + :ivar catenate_numbers: A value indicating whether maximum runs of number parts will be catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. - :paramtype catenate_numbers: bool - :keyword catenate_all: A value indicating whether all subword parts will be catenated. For + :vartype catenate_numbers: bool + :ivar catenate_all: A value indicating whether all subword parts will be catenated. For example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. - :paramtype catenate_all: bool - :keyword split_on_case_change: A value indicating whether to split words on caseChange. For + :vartype catenate_all: bool + :ivar split_on_case_change: A value indicating whether to split words on caseChange. For example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. - :paramtype split_on_case_change: bool - :keyword preserve_original: A value indicating whether original words will be preserved and - added to the subword list. Default is false. - :paramtype preserve_original: bool - :keyword split_on_numerics: A value indicating whether to split on numbers. For example, if - this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. - :paramtype split_on_numerics: bool - :keyword stem_english_possessive: A value indicating whether to remove trailing "'s" for each + :vartype split_on_case_change: bool + :ivar preserve_original: A value indicating whether original words will be preserved and added + to the subword list. Default is false. + :vartype preserve_original: bool + :ivar split_on_numerics: A value indicating whether to split on numbers. For example, if this + is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. + :vartype split_on_numerics: bool + :ivar stem_english_possessive: A value indicating whether to remove trailing "'s" for each subword. Default is true. - :paramtype stem_english_possessive: bool - :keyword protected_words: A list of tokens to protect from being delimited. - :paramtype protected_words: list[str] + :vartype stem_english_possessive: bool + :ivar protected_words: A list of tokens to protect from being delimited. + :vartype protected_words: list[str] """ _validation = { @@ -7344,6 +9423,43 @@ def __init__( protected_words: Optional[List[str]] = None, **kwargs ): + """ + :keyword name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :paramtype name: str + :keyword generate_word_parts: A value indicating whether to generate part words. If set, causes + parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is + true. + :paramtype generate_word_parts: bool + :keyword generate_number_parts: A value indicating whether to generate number subwords. Default + is true. + :paramtype generate_number_parts: bool + :keyword catenate_words: A value indicating whether maximum runs of word parts will be + catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default + is false. + :paramtype catenate_words: bool + :keyword catenate_numbers: A value indicating whether maximum runs of number parts will be + catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. + :paramtype catenate_numbers: bool + :keyword catenate_all: A value indicating whether all subword parts will be catenated. For + example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. + :paramtype catenate_all: bool + :keyword split_on_case_change: A value indicating whether to split words on caseChange. For + example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. + :paramtype split_on_case_change: bool + :keyword preserve_original: A value indicating whether original words will be preserved and + added to the subword list. Default is false. + :paramtype preserve_original: bool + :keyword split_on_numerics: A value indicating whether to split on numbers. For example, if + this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. + :paramtype split_on_numerics: bool + :keyword stem_english_possessive: A value indicating whether to remove trailing "'s" for each + subword. Default is true. + :paramtype stem_english_possessive: bool + :keyword protected_words: A list of tokens to protect from being delimited. + :paramtype protected_words: list[str] + """ super(WordDelimiterTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.WordDelimiterTokenFilter' # type: str self.generate_word_parts = generate_word_parts diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py index 85a4b01663ec..ce887f96e5b3 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py @@ -12,12 +12,12 @@ from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse -from azure.core.pipeline.transport._base import _format_url_section from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from msrest import Serializer from .. import models as _models +from .._vendor import _convert_request, _format_url_section if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -38,7 +38,7 @@ def build_create_or_update_request( x_ms_client_request_id = kwargs.pop('x_ms_client_request_id', None) # type: Optional[str] if_match = kwargs.pop('if_match', None) # type: Optional[str] if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str] - ignore_reset_requirements = kwargs.pop('ignore_reset_requirements', None) # type: Optional[bool] + skip_indexer_reset_requirement_for_cache = kwargs.pop('skip_indexer_reset_requirement_for_cache', None) # type: Optional[bool] prefer = "return=representation" api_version = "2021-04-30-Preview" @@ -54,8 +54,8 @@ def build_create_or_update_request( # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') - if ignore_reset_requirements is not None: - query_parameters['ignoreResetRequirements'] = _SERIALIZER.query("ignore_reset_requirements", ignore_reset_requirements, 'bool') + if skip_indexer_reset_requirement_for_cache is not None: + query_parameters['ignoreResetRequirements'] = _SERIALIZER.query("skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, 'bool') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] @@ -252,7 +252,7 @@ def create_or_update( data_source, # type: "_models.SearchIndexerDataSource" if_match=None, # type: Optional[str] if_none_match=None, # type: Optional[str] - ignore_reset_requirements=None, # type: Optional[bool] + skip_indexer_reset_requirement_for_cache=None, # type: Optional[bool] request_options=None, # type: Optional["_models.RequestOptions"] **kwargs # type: Any ): @@ -269,8 +269,8 @@ def create_or_update( :param if_none_match: Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. :type if_none_match: str - :param ignore_reset_requirements: Ignores cache reset requirements. - :type ignore_reset_requirements: bool + :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. + :type skip_indexer_reset_requirement_for_cache: bool :param request_options: Parameter group. :type request_options: ~azure.search.documents.indexes.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response @@ -297,10 +297,11 @@ def create_or_update( x_ms_client_request_id=_x_ms_client_request_id, if_match=if_match, if_none_match=if_none_match, - ignore_reset_requirements=ignore_reset_requirements, + skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, json=json, template_url=self.create_or_update.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -311,7 +312,7 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if response.status_code == 200: @@ -371,7 +372,8 @@ def delete( if_match=if_match, if_none_match=if_none_match, template_url=self.delete.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -382,7 +384,7 @@ def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -424,7 +426,8 @@ def get( data_source_name=data_source_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -435,7 +438,7 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) @@ -483,7 +486,8 @@ def list( select=select, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.list.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -494,7 +498,7 @@ def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('ListDataSourcesResult', pipeline_response) @@ -544,7 +548,8 @@ def create( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.create.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -555,7 +560,7 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py index ae85d03e1d14..6e28e2cbd2ae 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py @@ -12,12 +12,12 @@ from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse -from azure.core.pipeline.transport._base import _format_url_section from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from msrest import Serializer from .. import models as _models +from .._vendor import _convert_request, _format_url_section if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -152,8 +152,8 @@ def build_create_or_update_request( x_ms_client_request_id = kwargs.pop('x_ms_client_request_id', None) # type: Optional[str] if_match = kwargs.pop('if_match', None) # type: Optional[str] if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str] + skip_indexer_reset_requirement_for_cache = kwargs.pop('skip_indexer_reset_requirement_for_cache', None) # type: Optional[bool] disable_cache_reprocessing_change_detection = kwargs.pop('disable_cache_reprocessing_change_detection', None) # type: Optional[bool] - ignore_reset_requirements = kwargs.pop('ignore_reset_requirements', None) # type: Optional[bool] prefer = "return=representation" api_version = "2021-04-30-Preview" @@ -169,10 +169,10 @@ def build_create_or_update_request( # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') + if skip_indexer_reset_requirement_for_cache is not None: + query_parameters['ignoreResetRequirements'] = _SERIALIZER.query("skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, 'bool') if disable_cache_reprocessing_change_detection is not None: query_parameters['disableCacheReprocessingChangeDetection'] = _SERIALIZER.query("disable_cache_reprocessing_change_detection", disable_cache_reprocessing_change_detection, 'bool') - if ignore_reset_requirements is not None: - query_parameters['ignoreResetRequirements'] = _SERIALIZER.query("ignore_reset_requirements", ignore_reset_requirements, 'bool') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] @@ -431,7 +431,8 @@ def reset( indexer_name=indexer_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.reset.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -442,7 +443,7 @@ def reset( if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -456,7 +457,7 @@ def reset_docs( self, indexer_name, # type: str overwrite=False, # type: Optional[bool] - keys_or_ids=None, # type: Optional["_models.Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema"] + keys_or_ids=None, # type: Optional["_models.DocumentKeysOrIds"] request_options=None, # type: Optional["_models.RequestOptions"] **kwargs # type: Any ): @@ -469,8 +470,7 @@ def reset_docs( keys or ids in this payload will be queued to be re-ingested. :type overwrite: bool :param keys_or_ids: - :type keys_or_ids: - ~azure.search.documents.indexes.models.Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema + :type keys_or_ids: ~azure.search.documents.indexes.models.DocumentKeysOrIds :param request_options: Parameter group. :type request_options: ~azure.search.documents.indexes.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response @@ -490,7 +490,7 @@ def reset_docs( if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id if keys_or_ids is not None: - json = self._serialize.body(keys_or_ids, 'Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema') + json = self._serialize.body(keys_or_ids, 'DocumentKeysOrIds') else: json = None @@ -501,7 +501,8 @@ def reset_docs( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.reset_docs.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -512,7 +513,7 @@ def reset_docs( if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -554,7 +555,8 @@ def run( indexer_name=indexer_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.run.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -565,7 +567,7 @@ def run( if response.status_code not in [202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -581,8 +583,8 @@ def create_or_update( indexer, # type: "_models.SearchIndexer" if_match=None, # type: Optional[str] if_none_match=None, # type: Optional[str] + skip_indexer_reset_requirement_for_cache=None, # type: Optional[bool] disable_cache_reprocessing_change_detection=None, # type: Optional[bool] - ignore_reset_requirements=None, # type: Optional[bool] request_options=None, # type: Optional["_models.RequestOptions"] **kwargs # type: Any ): @@ -599,11 +601,11 @@ def create_or_update( :param if_none_match: Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. :type if_none_match: str + :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. + :type skip_indexer_reset_requirement_for_cache: bool :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change detection. :type disable_cache_reprocessing_change_detection: bool - :param ignore_reset_requirements: Ignores cache reset requirements. - :type ignore_reset_requirements: bool :param request_options: Parameter group. :type request_options: ~azure.search.documents.indexes.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response @@ -630,11 +632,12 @@ def create_or_update( x_ms_client_request_id=_x_ms_client_request_id, if_match=if_match, if_none_match=if_none_match, + skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, - ignore_reset_requirements=ignore_reset_requirements, json=json, template_url=self.create_or_update.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -645,7 +648,7 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if response.status_code == 200: @@ -705,7 +708,8 @@ def delete( if_match=if_match, if_none_match=if_none_match, template_url=self.delete.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -716,7 +720,7 @@ def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -758,7 +762,8 @@ def get( indexer_name=indexer_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -769,7 +774,7 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexer', pipeline_response) @@ -817,7 +822,8 @@ def list( select=select, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.list.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -828,7 +834,7 @@ def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('ListIndexersResult', pipeline_response) @@ -878,7 +884,8 @@ def create( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.create.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -889,7 +896,7 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexer', pipeline_response) @@ -935,7 +942,8 @@ def get_status( indexer_name=indexer_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get_status.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -946,7 +954,7 @@ def get_status( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexerStatus', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py index c5a7e11c1930..60c586e32008 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py @@ -13,12 +13,12 @@ from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse -from azure.core.pipeline.transport._base import _format_url_section from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from msrest import Serializer from .. import models as _models +from .._vendor import _convert_request, _format_url_section if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -358,7 +358,8 @@ def create( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.create.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -369,7 +370,7 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndex', pipeline_response) @@ -418,7 +419,8 @@ def prepare_request(next_link=None): select=select, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.list.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -433,7 +435,8 @@ def prepare_request(next_link=None): select=select, x_ms_client_request_id=_x_ms_client_request_id, template_url=next_link, - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -460,7 +463,7 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -530,7 +533,8 @@ def create_or_update( if_none_match=if_none_match, json=json, template_url=self.create_or_update.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -541,7 +545,7 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if response.status_code == 200: @@ -603,7 +607,8 @@ def delete( if_match=if_match, if_none_match=if_none_match, template_url=self.delete.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -614,7 +619,7 @@ def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -656,7 +661,8 @@ def get( index_name=index_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -667,7 +673,7 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndex', pipeline_response) @@ -713,7 +719,8 @@ def get_statistics( index_name=index_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get_statistics.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -724,7 +731,7 @@ def get_statistics( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('GetIndexStatisticsResult', pipeline_response) @@ -778,7 +785,8 @@ def analyze( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.analyze.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -789,7 +797,7 @@ def analyze( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AnalyzeResult', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_client_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_client_operations.py index c717faa75ed0..8b7124ce5113 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_client_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_client_operations.py @@ -17,6 +17,7 @@ from msrest import Serializer from .. import models as _models +from .._vendor import _convert_request if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -89,7 +90,8 @@ def get_service_statistics( request = build_get_service_statistics_request( x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get_service_statistics.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -100,7 +102,7 @@ def get_service_statistics( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('ServiceStatistics', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py index 55ca1000e361..811faaa9c8e1 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py @@ -12,16 +12,16 @@ from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse -from azure.core.pipeline.transport._base import _format_url_section from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from msrest import Serializer from .. import models as _models +from .._vendor import _convert_request, _format_url_section if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] @@ -38,8 +38,8 @@ def build_create_or_update_request( x_ms_client_request_id = kwargs.pop('x_ms_client_request_id', None) # type: Optional[str] if_match = kwargs.pop('if_match', None) # type: Optional[str] if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str] + skip_indexer_reset_requirement_for_cache = kwargs.pop('skip_indexer_reset_requirement_for_cache', None) # type: Optional[bool] disable_cache_reprocessing_change_detection = kwargs.pop('disable_cache_reprocessing_change_detection', None) # type: Optional[bool] - ignore_reset_requirements = kwargs.pop('ignore_reset_requirements', None) # type: Optional[bool] prefer = "return=representation" api_version = "2021-04-30-Preview" @@ -55,10 +55,10 @@ def build_create_or_update_request( # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') + if skip_indexer_reset_requirement_for_cache is not None: + query_parameters['ignoreResetRequirements'] = _SERIALIZER.query("skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, 'bool') if disable_cache_reprocessing_change_detection is not None: query_parameters['disableCacheReprocessingChangeDetection'] = _SERIALIZER.query("disable_cache_reprocessing_change_detection", disable_cache_reprocessing_change_detection, 'bool') - if ignore_reset_requirements is not None: - query_parameters['ignoreResetRequirements'] = _SERIALIZER.query("ignore_reset_requirements", ignore_reset_requirements, 'bool') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] @@ -225,6 +225,45 @@ def build_create_request( **kwargs ) + +def build_reset_skills_request( + skillset_name, # type: str + **kwargs # type: Any +): + # type: (...) -> HttpRequest + content_type = kwargs.pop('content_type', None) # type: Optional[str] + x_ms_client_request_id = kwargs.pop('x_ms_client_request_id', None) # type: Optional[str] + + api_version = "2021-04-30-Preview" + accept = "application/json" + # Construct URL + url = kwargs.pop("template_url", '/skillsets(\'{skillsetName}\')/search.resetskills') + path_format_arguments = { + "skillsetName": _SERIALIZER.url("skillset_name", skillset_name, 'str'), + } + + url = _format_url_section(url, **path_format_arguments) + + # Construct parameters + query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] + query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] + if x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, 'str') + if content_type is not None: + header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') + header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') + + return HttpRequest( + method="POST", + url=url, + params=query_parameters, + headers=header_parameters, + **kwargs + ) + # fmt: on class SkillsetsOperations(object): """SkillsetsOperations operations. @@ -255,8 +294,8 @@ def create_or_update( skillset, # type: "_models.SearchIndexerSkillset" if_match=None, # type: Optional[str] if_none_match=None, # type: Optional[str] + skip_indexer_reset_requirement_for_cache=None, # type: Optional[bool] disable_cache_reprocessing_change_detection=None, # type: Optional[bool] - ignore_reset_requirements=None, # type: Optional[bool] request_options=None, # type: Optional["_models.RequestOptions"] **kwargs # type: Any ): @@ -274,11 +313,11 @@ def create_or_update( :param if_none_match: Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. :type if_none_match: str + :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. + :type skip_indexer_reset_requirement_for_cache: bool :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change detection. :type disable_cache_reprocessing_change_detection: bool - :param ignore_reset_requirements: Ignores cache reset requirements. - :type ignore_reset_requirements: bool :param request_options: Parameter group. :type request_options: ~azure.search.documents.indexes.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response @@ -305,11 +344,12 @@ def create_or_update( x_ms_client_request_id=_x_ms_client_request_id, if_match=if_match, if_none_match=if_none_match, + skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, - ignore_reset_requirements=ignore_reset_requirements, json=json, template_url=self.create_or_update.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -320,7 +360,7 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if response.status_code == 200: @@ -380,7 +420,8 @@ def delete( if_match=if_match, if_none_match=if_none_match, template_url=self.delete.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -391,7 +432,7 @@ def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -433,7 +474,8 @@ def get( skillset_name=skillset_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -444,7 +486,7 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) @@ -492,7 +534,8 @@ def list( select=select, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.list.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -503,7 +546,7 @@ def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('ListSkillsetsResult', pipeline_response) @@ -553,7 +596,8 @@ def create( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.create.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -564,7 +608,7 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) @@ -576,3 +620,66 @@ def create( create.metadata = {'url': '/skillsets'} # type: ignore + + @distributed_trace + def reset_skills( + self, + skillset_name, # type: str + skill_names=None, # type: Optional[List[str]] + request_options=None, # type: Optional["_models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Reset an existing skillset in a search service. + + :param skillset_name: The name of the skillset to reset. + :type skillset_name: str + :param skill_names: the names of skills to be reset. + :type skill_names: list[str] + :param request_options: Parameter group. + :type request_options: ~azure.search.documents.indexes.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + _skill_names = _models.SkillNames(skill_names=skill_names) + json = self._serialize.body(_skill_names, 'SkillNames') + + request = build_reset_skills_request( + skillset_name=skillset_name, + content_type=content_type, + x_ms_client_request_id=_x_ms_client_request_id, + json=json, + template_url=self.reset_skills.metadata['url'], + ) + request = _convert_request(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + request.url = self._client.format_url(request.url, **path_format_arguments) + + pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + reset_skills.metadata = {'url': '/skillsets(\'{skillsetName}\')/search.resetskills'} # type: ignore + diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py index 0ce0864491c3..620646412a94 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py @@ -12,12 +12,12 @@ from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse -from azure.core.pipeline.transport._base import _format_url_section from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from msrest import Serializer from .. import models as _models +from .._vendor import _convert_request, _format_url_section if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -293,7 +293,8 @@ def create_or_update( if_none_match=if_none_match, json=json, template_url=self.create_or_update.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -304,7 +305,7 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if response.status_code == 200: @@ -364,7 +365,8 @@ def delete( if_match=if_match, if_none_match=if_none_match, template_url=self.delete.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -375,7 +377,7 @@ def delete( if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) if cls: @@ -417,7 +419,8 @@ def get( synonym_map_name=synonym_map_name, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.get.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -428,7 +431,7 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SynonymMap', pipeline_response) @@ -476,7 +479,8 @@ def list( select=select, x_ms_client_request_id=_x_ms_client_request_id, template_url=self.list.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -487,7 +491,7 @@ def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('ListSynonymMapsResult', pipeline_response) @@ -537,7 +541,8 @@ def create( x_ms_client_request_id=_x_ms_client_request_id, json=json, template_url=self.create.metadata['url'], - )._to_pipeline_transport_request() + ) + request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -548,7 +553,7 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.SearchError, response) + error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SynonymMap', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py index 27a083e917f6..c7e55716c554 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py @@ -114,6 +114,11 @@ def create_or_update_indexer(self, indexer, **kwargs): :param indexer: The definition of the indexer to create or update. :type indexer: ~azure.search.documents.indexes.models.SearchIndexer + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. + :paramtype disable_cache_reprocessing_change_detection: bool :return: The created IndexSearchIndexerer :rtype: ~azure.search.documents.indexes.models.SearchIndexer """ @@ -330,9 +335,12 @@ def create_or_update_data_source_connection(self, data_source_connection, **kwar :type data_source_connection: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. + :paramtype skip_indexer_reset_requirement_for_cache: bool :return: The created SearchIndexerDataSourceConnection :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection """ + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( data_source_connection, @@ -584,6 +592,11 @@ def create_or_update_skillset(self, skillset, **kwargs): :type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. + :paramtype disable_cache_reprocessing_change_detection: bool :return: The created or updated SearchIndexerSkillset :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py index d8171269bcf6..d0e589f9a39e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py @@ -115,6 +115,11 @@ async def create_or_update_indexer(self, indexer, **kwargs): :param indexer: The definition of the indexer to create or update. :type indexer: ~azure.search.documents.indexes.models.SearchIndexer + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. + :paramtype disable_cache_reprocessing_change_detection: bool :return: The created SearchIndexer :rtype: ~azure.search.documents.indexes.models.SearchIndexer """ @@ -324,6 +329,8 @@ async def create_or_update_data_source_connection( :type data_source_connection: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. + :type skip_indexer_reset_requirement_for_cache: bool :return: The created SearchIndexerDataSourceConnection :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection """ @@ -571,6 +578,11 @@ async def create_or_update_skillset(self, skillset, **kwargs): :type skillset: :class:`~azure.search.documents.indexes.models.SearchIndexerSkillset` :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. + :paramtype disable_cache_reprocessing_change_detection: bool :return: The created or updated SearchIndexerSkillset :rtype: :class:`~azure.search.documents.indexes.models.SearchIndexerSkillset` diff --git a/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py index b1d2c678d771..c93b4ac66b9c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py @@ -25,32 +25,32 @@ # -------------------------------------------------------------------------- from .._generated.models import ( - Answers, AnswerResult, AutocompleteMode, - Captions, CaptionResult, IndexAction, IndexingResult, + QueryAnswerType, + QueryCaptionType, QueryLanguage, + QuerySpellerType, QueryType, SearchMode, - Speller, ) from .._search_client import odata __all__ = ( - "Answers", "AnswerResult", "AutocompleteMode", - "Captions", "CaptionResult", "IndexAction", "IndexingResult", "odata", + "QueryAnswerType", + "QueryCaptionType", "QueryLanguage", + "QuerySpellerType", "QueryType", "SearchMode", - "Speller", ) diff --git a/sdk/search/azure-search-documents/samples/async_samples/sample_semantic_search_async.py b/sdk/search/azure-search-documents/samples/async_samples/sample_semantic_search_async.py index 5b14f39da581..b52f21b5fba3 100644 --- a/sdk/search/azure-search-documents/samples/async_samples/sample_semantic_search_async.py +++ b/sdk/search/azure-search-documents/samples/async_samples/sample_semantic_search_async.py @@ -35,7 +35,7 @@ async def speller(): client = SearchClient(endpoint=endpoint, index_name=index_name, credential=credential) - results = await client.search(search_text="luxucy", query_language="en-us", speller="lexicon") + results = await client.search(search_text="luxucy", query_language="en-us", query_speller="lexicon") async for result in results: print("{}\n{}\n)".format(result["HotelId"], result["HotelName"])) diff --git a/sdk/search/azure-search-documents/samples/sample_semantic_search.py b/sdk/search/azure-search-documents/samples/sample_semantic_search.py index 6713b25fa4d3..9acdb6e1f6bb 100644 --- a/sdk/search/azure-search-documents/samples/sample_semantic_search.py +++ b/sdk/search/azure-search-documents/samples/sample_semantic_search.py @@ -34,7 +34,7 @@ def speller(): client = SearchClient(endpoint=endpoint, index_name=index_name, credential=credential) - results = list(client.search(search_text="luxucy", query_language="en-us", speller="lexicon")) + results = list(client.search(search_text="luxucy", query_language="en-us", query_speller="lexicon")) for result in results: print("{}\n{}\n)".format(result["HotelId"], result["HotelName"])) diff --git a/sdk/search/azure-search-documents/setup.py b/sdk/search/azure-search-documents/setup.py index 446aae1aa38c..6c3c34cdaae4 100644 --- a/sdk/search/azure-search-documents/setup.py +++ b/sdk/search/azure-search-documents/setup.py @@ -67,6 +67,7 @@ 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', 'License :: OSI Approved :: MIT License', ], zip_safe=False, From bf0e40e5d9db18f77c75178c820668d01813f4b0 Mon Sep 17 00:00:00 2001 From: Krista Pratico Date: Fri, 1 Oct 2021 11:17:58 -0700 Subject: [PATCH 05/10] [formrecognizer] updating docs with aka.ms links (#21012) * add service links * update with the aka.ms links --- sdk/formrecognizer/azure-ai-formrecognizer/README.md | 4 ++++ .../ai/formrecognizer/_document_analysis_client.py | 10 ++++------ .../aio/_document_analysis_client_async.py | 10 ++++------ 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/README.md b/sdk/formrecognizer/azure-ai-formrecognizer/README.md index a66d8ea808e3..7ca9509f14c5 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/README.md +++ b/sdk/formrecognizer/azure-ai-formrecognizer/README.md @@ -160,6 +160,7 @@ Use the `model` parameter to select the type of model for analysis. |"{custom-model-id}"| Text extraction, selection marks, tables, labeled fields and values from your custom documents Sample code snippets are provided to illustrate using a DocumentAnalysisClient [here](#examples "Examples"). +More information about analyzing documents, including supported features and locales can be found in the [service documentation][fr-models]. ### DocumentModelAdministrationClient `DocumentModelAdministrationClient` provides operations for: @@ -532,6 +533,7 @@ except ResourceNotFoundError: ### General Form Recognizer client library will raise exceptions defined in [Azure Core][azure_core_exceptions]. +Error codes and messages raised by the Form Recognizer service can be found in the [service documentation][fr-errors]. ### Logging This library uses the standard @@ -584,6 +586,8 @@ This project has adopted the [Microsoft Open Source Code of Conduct][code_of_con [fr-labeling-tool]: https://aka.ms/azsdk/formrecognizer/labelingtool [fr-build-model]: https://aka.ms/azsdk/formrecognizer/buildmodel [fr-build-training-set]: https://aka.ms/azsdk/formrecognizer/buildtrainingset +[fr-models]: https://aka.ms/azsdk/formrecognizer/models +[fr-errors]: https://aka.ms/azsdk/formrecognizer/errors [azure_core_ref_docs]: https://aka.ms/azsdk/python/core/docs [azure_core_exceptions]: https://aka.ms/azsdk/python/core/docs#module-azure.core.exceptions diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_document_analysis_client.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_document_analysis_client.py index b71b519195e1..9e625e4693db 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_document_analysis_client.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_document_analysis_client.py @@ -78,9 +78,8 @@ def begin_analyze_document(self, model, document, **kwargs): """Analyze field text and semantic values from a given document. :param str model: A unique model identifier can be passed in as a string. - Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs to use are: - "prebuilt-receipt", "prebuilt-invoice", "prebuilt-idDocument", "prebuilt-businessCard", - "prebuilt-document", "prebuilt-layout". + Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs supported + can be found here: https://aka.ms/azsdk/formrecognizer/models :param document: JPEG, PNG, PDF, TIFF, or BMP type file stream or bytes. :type document: bytes or IO[bytes] :keyword str pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers @@ -134,9 +133,8 @@ def begin_analyze_document_from_url(self, model, document_url, **kwargs): The input must be the location (URL) of the document to be analyzed. :param str model: A unique model identifier can be passed in as a string. - Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs to use are: - "prebuilt-receipt", "prebuilt-invoice", "prebuilt-idDocument", "prebuilt-businessCard", - "prebuilt-document", "prebuilt-layout". + Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs supported + can be found here: https://aka.ms/azsdk/formrecognizer/models :param str document_url: The URL of the document to analyze. The input must be a valid, encoded, and publicly accessible URL of one of the supported formats: JPEG, PNG, PDF, TIFF, or BMP. :keyword str pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_document_analysis_client_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_document_analysis_client_async.py index f5ebaa29eb3f..25b71f5cfc16 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_document_analysis_client_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_document_analysis_client_async.py @@ -84,9 +84,8 @@ async def begin_analyze_document( """Analyze field text and semantic values from a given document. :param str model: A unique model identifier can be passed in as a string. - Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs to use are: - "prebuilt-receipt", "prebuilt-invoice", "prebuilt-idDocument", "prebuilt-businessCard", - "prebuilt-document", "prebuilt-layout". + Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs supported + can be found here: https://aka.ms/azsdk/formrecognizer/models :param document: JPEG, PNG, PDF, TIFF, or BMP type file stream or bytes. :type document: bytes or IO[bytes] :keyword str pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers @@ -141,9 +140,8 @@ async def begin_analyze_document_from_url( The input must be the location (URL) of the document to be analyzed. :param str model: A unique model identifier can be passed in as a string. - Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs to use are: - "prebuilt-receipt", "prebuilt-invoice", "prebuilt-idDocument", "prebuilt-businessCard", - "prebuilt-document", "prebuilt-layout". + Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs supported + can be found here: https://aka.ms/azsdk/formrecognizer/models :param str document_url: The URL of the document to analyze. The input must be a valid, encoded, and publicly accessible URL of one of the supported formats: JPEG, PNG, PDF, TIFF, or BMP. :keyword str pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers From cfb945de34b27c37177d7c038a40d3c53459afa2 Mon Sep 17 00:00:00 2001 From: catalinaperalta Date: Fri, 1 Oct 2021 14:57:48 -0400 Subject: [PATCH 06/10] [formrecognizer] Update changelog for release (#20963) * update changelog for release * include snippet about new clients --- sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md b/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md index f05f6ba48035..457b631d2f65 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md +++ b/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md @@ -1,9 +1,11 @@ # Release History -## 3.2.0b1 (Unreleased) +## 3.2.0b1 (2021-10-05) This version of the SDK defaults to the latest supported API version, which currently is v2021-09-30-preview. +> Note: Starting with version 2021-09-30-preview, a new set of clients were introduced to leverage the newest features of the Form Recognizer service. Please see the Migration Guide for detailed instructions on how to update application code from client library version 3.1.X or lower to the latest version. Also, please refer to the README for more information about the library. + ### Features Added - Added new `DocumentAnalysisClient` with `begin_analyze_document` and `begin_analyze_document_from_url` methods. Use these methods with the latest Form Recognizer API version to analyze documents, with prebuilt and custom models. @@ -13,10 +15,6 @@ API version to analyze documents, with prebuilt and custom models. - Added samples using the `DocumentAnalysisClient` and `DocumentModelAdministrationClient` under `/samples/v3.2-beta`. - Added `DocumentAnalysisApiVersion` to be used with `DocumentAnalysisClient` and `DocumentModelAdministrationClient`. -### Breaking Changes - -### Bugs Fixed - ### Other Changes - Python 3.5 is no longer supported in this release. From 6b336540da0cfb694725b37e91cb206202b514d9 Mon Sep 17 00:00:00 2001 From: swathipil <76007337+swathipil@users.noreply.github.com> Date: Fri, 1 Oct 2021 13:04:56 -0700 Subject: [PATCH 07/10] [SchemaRegistry] rename namespace (#20965) fixes: #20711 --- .../CHANGELOG.md | 19 ++++++++++--------- .../serializer/avroserializer/__init__.py | 4 ++-- .../_schema_registry_avro_serializer.py | 4 ++-- .../samples/avro_serializer.py | 4 ++-- .../samples/eventhub_receive_integration.py | 8 ++++---- .../samples/eventhub_send_integration.py | 8 ++++---- ...serializer_with_auto_register_schemas.yaml | 2 +- ...ializer_without_auto_register_schemas.yaml | 2 +- .../tests/test_avro_serializer.py | 8 ++++---- .../schemaregistry/serializer/__init__.py | 1 + 10 files changed, 31 insertions(+), 29 deletions(-) diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/CHANGELOG.md b/sdk/schemaregistry/azure-schemaregistry-avroserializer/CHANGELOG.md index a24aa8e035c3..7245b36e68ab 100644 --- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/CHANGELOG.md +++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/CHANGELOG.md @@ -4,19 +4,20 @@ ### Features Added -- `auto_register_schemas` keyword argument has been added to `SchemaRegistryAvroSerializer`, which will allow for automatically registering schemas passed in to the `serialize`. -- `value` parameter in `serialize` on `SchemaRegistryAvroSerializer` takes type `Mapping` rather than `Dict`. +- `auto_register_schemas` keyword argument has been added to `AvroSerializer`, which will allow for automatically registering schemas passed in to the `serialize`. +- `value` parameter in `serialize` on `AvroSerializer` takes type `Mapping` rather than `Dict`. ### Breaking Changes -- `schema_registry` parameter in the `SchemaRegistryAvroSerializer` constructor has been renamed `client`. -- `schema_group` parameter in the `SchemaRegistryAvroSerializer` constructor has been renamed `group_name`. -- `data` parameter in the `serialize` and `deserialize` methods on `SchemaRegistryAvroSerializer` has been renamed `value`. -- `schema` parameter in the `serialize` method on `SchemaRegistryAvroSerializer` no longer accepts argument of type `bytes`. -- `SchemaRegistryAvroSerializer` constructor no longer takes in the `codec` keyword argument. +- `SchemaRegistryAvroSerializer` has been renamed `AvroSerializer`. +- `schema_registry` parameter in the `AvroSerializer` constructor has been renamed `client`. +- `schema_group` parameter in the `AvroSerializer` constructor has been renamed `group_name`. +- `data` parameter in the `serialize` and `deserialize` methods on `AvroSerializer` has been renamed `value`. +- `schema` parameter in the `serialize` method on `AvroSerializer` no longer accepts argument of type `bytes`. +- `AvroSerializer` constructor no longer takes in the `codec` keyword argument. - The following positional arguments are now required keyword arguments: - - `client` and `group_name` in `SchemaRegistryAvroSerializer` constructor - - `schema` in `serialize` on `SchemaRegistryAvroSerializer` + - `client` and `group_name` in `AvroSerializer` constructor + - `schema` in `serialize` on `AvroSerializer` ### Bugs Fixed diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/__init__.py b/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/__init__.py index fe999769d03e..c9a4c0074933 100644 --- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/__init__.py +++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/__init__.py @@ -27,8 +27,8 @@ __version__ = VERSION -from ._schema_registry_avro_serializer import SchemaRegistryAvroSerializer +from ._schema_registry_avro_serializer import AvroSerializer __all__ = [ - "SchemaRegistryAvroSerializer" + "AvroSerializer" ] diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/_schema_registry_avro_serializer.py b/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/_schema_registry_avro_serializer.py index f316fa9d1f07..cbd6c1aefede 100644 --- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/_schema_registry_avro_serializer.py +++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/_schema_registry_avro_serializer.py @@ -35,9 +35,9 @@ from ._avro_serializer import AvroObjectSerializer -class SchemaRegistryAvroSerializer(object): +class AvroSerializer(object): """ - SchemaRegistryAvroSerializer provides the ability to serialize and deserialize data according + AvroSerializer provides the ability to serialize and deserialize data according to the given avro schema. It would automatically register, get and cache the schema. :keyword client: Required. The schema registry client diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/avro_serializer.py b/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/avro_serializer.py index a31e4a7b88ee..b93ebd71d450 100644 --- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/avro_serializer.py +++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/avro_serializer.py @@ -27,7 +27,7 @@ from azure.identity import ClientSecretCredential from azure.schemaregistry import SchemaRegistryClient -from azure.schemaregistry.serializer.avroserializer import SchemaRegistryAvroSerializer +from azure.schemaregistry.serializer.avroserializer import AvroSerializer TENANT_ID=os.environ['AZURE_TENANT_ID'] CLIENT_ID=os.environ['AZURE_CLIENT_ID'] @@ -80,7 +80,7 @@ def deserialize(serializer, bytes_payload): if __name__ == '__main__': schema_registry = SchemaRegistryClient(endpoint=SCHEMAREGISTRY_FULLY_QUALIFIED_NAMESPACE, credential=token_credential) - serializer = SchemaRegistryAvroSerializer(client=schema_registry, group_name=GROUP_NAME, auto_register_schemas=True) + serializer = AvroSerializer(client=schema_registry, group_name=GROUP_NAME, auto_register_schemas=True) bytes_data_ben, bytes_data_alice = serialize(serializer) dict_data_ben = deserialize(serializer, bytes_data_ben) dict_data_alice = deserialize(serializer, bytes_data_alice) diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_receive_integration.py b/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_receive_integration.py index 3cbf949adf00..5bcc2c803337 100644 --- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_receive_integration.py +++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_receive_integration.py @@ -6,7 +6,7 @@ # -------------------------------------------------------------------------------------------- """ -Examples to show receiving events from EventHub with SchemaRegistryAvroSerializer integrated for data deserialization. +Examples to show receiving events from EventHub with AvroSerializer integrated for data deserialization. """ # pylint: disable=C0111 @@ -14,7 +14,7 @@ from azure.eventhub import EventHubConsumerClient from azure.identity import DefaultAzureCredential from azure.schemaregistry import SchemaRegistryClient -from azure.schemaregistry.serializer.avroserializer import SchemaRegistryAvroSerializer +from azure.schemaregistry.serializer.avroserializer import AvroSerializer EVENTHUB_CONNECTION_STR = os.environ['EVENT_HUB_CONN_STR'] EVENTHUB_NAME = os.environ['EVENT_HUB_NAME'] @@ -44,9 +44,9 @@ def on_event(partition_context, event): ) -# create a SchemaRegistryAvroSerializer instance +# create a AvroSerializer instance # TODO: after 'azure-schemaregistry==1.0.0b3' is released, update 'endpoint' to 'fully_qualified_namespace' -avro_serializer = SchemaRegistryAvroSerializer( +avro_serializer = AvroSerializer( client=SchemaRegistryClient( endpoint=SCHEMAREGISTRY_FULLY_QUALIFIED_NAMESPACE, credential=DefaultAzureCredential() diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_send_integration.py b/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_send_integration.py index d184e87d89a8..2821272efe29 100644 --- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_send_integration.py +++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_send_integration.py @@ -6,7 +6,7 @@ # -------------------------------------------------------------------------------------------- """ -Examples to show sending event to EventHub with SchemaRegistryAvroSerializer integrated for data serialization. +Examples to show sending event to EventHub with AvroSerializer integrated for data serialization. """ # pylint: disable=C0111 @@ -15,7 +15,7 @@ from azure.eventhub import EventHubProducerClient, EventData from azure.identity import DefaultAzureCredential from azure.schemaregistry import SchemaRegistryClient -from azure.schemaregistry.serializer.avroserializer import SchemaRegistryAvroSerializer +from azure.schemaregistry.serializer.avroserializer import AvroSerializer EVENTHUB_CONNECTION_STR = os.environ['EVENT_HUB_CONN_STR'] EVENTHUB_NAME = os.environ['EVENT_HUB_NAME'] @@ -58,9 +58,9 @@ def send_event_data_batch(producer, serializer): ) -# create a SchemaRegistryAvroSerializer instance +# create a AvroSerializer instance # TODO: after 'azure-schemaregistry==1.0.0b3' is released, update 'endpoint' to 'fully_qualified_namespace' -avro_serializer = SchemaRegistryAvroSerializer( +avro_serializer = AvroSerializer( client=SchemaRegistryClient( endpoint=SCHEMAREGISTRY_FULLY_QUALIFIED_NAMESPACE, credential=DefaultAzureCredential() diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_with_auto_register_schemas.yaml b/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_with_auto_register_schemas.yaml index 90dc88aa96ec..cfd2920100e2 100644 --- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_with_auto_register_schemas.yaml +++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_with_auto_register_schemas.yaml @@ -28,7 +28,7 @@ interactions: content-type: - application/json date: - - Tue, 28 Sep 2021 22:27:25 GMT + - Thu, 30 Sep 2021 02:05:53 GMT location: - https://swathip-test-eventhubs.servicebus.windows.net:443/$schemagroups/fakegroup/schemas/example.avro.User/versions/1?api-version=2017-04 server: diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_without_auto_register_schemas.yaml b/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_without_auto_register_schemas.yaml index 7dce4b62fbfe..0feb5392eba4 100644 --- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_without_auto_register_schemas.yaml +++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_without_auto_register_schemas.yaml @@ -28,7 +28,7 @@ interactions: content-type: - application/json date: - - Tue, 28 Sep 2021 22:27:26 GMT + - Thu, 30 Sep 2021 02:05:54 GMT location: - https://swathip-test-eventhubs.servicebus.windows.net:443/$schemagroups/fakegroup/schemas/example.avro.User/versions/1?api-version=2017-04 server: diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/test_avro_serializer.py b/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/test_avro_serializer.py index bf2bca41907f..f392d8431917 100644 --- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/test_avro_serializer.py +++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/test_avro_serializer.py @@ -25,7 +25,7 @@ from io import BytesIO from azure.schemaregistry import SchemaRegistryClient -from azure.schemaregistry.serializer.avroserializer import SchemaRegistryAvroSerializer +from azure.schemaregistry.serializer.avroserializer import AvroSerializer from azure.schemaregistry.serializer.avroserializer._avro_serializer import AvroObjectSerializer from azure.identity import ClientSecretCredential from azure.core.exceptions import ClientAuthenticationError, ServiceRequestError, HttpResponseError @@ -34,7 +34,7 @@ SchemaRegistryPowerShellPreparer = functools.partial(PowerShellPreparer, "schemaregistry", schemaregistry_fully_qualified_namespace="fake_resource.servicebus.windows.net/", schemaregistry_group="fakegroup") -class SchemaRegistryAvroSerializerTests(AzureTestCase): +class AvroSerializerTests(AzureTestCase): def test_raw_avro_serializer(self): schema_str = """{"namespace":"example.avro","type":"record","name":"User","fields":[{"name":"name","type":"string"},{"name":"favorite_number","type":["int","null"]},{"name":"favorite_color","type":["string","null"]}]}""" @@ -78,7 +78,7 @@ def test_raw_avro_serializer_negative(self): def test_basic_sr_avro_serializer_with_auto_register_schemas(self, schemaregistry_fully_qualified_namespace, schemaregistry_group, **kwargs): # TODO: AFTER RELEASING azure-schemaregistry=1.0.0b3, UPDATE 'endpoint' to 'fully_qualified_namespace' sr_client = self.create_basic_client(SchemaRegistryClient, endpoint=schemaregistry_fully_qualified_namespace) - sr_avro_serializer = SchemaRegistryAvroSerializer(client=sr_client, group_name=schemaregistry_group, auto_register_schemas=True) + sr_avro_serializer = AvroSerializer(client=sr_client, group_name=schemaregistry_group, auto_register_schemas=True) schema_str = """{"namespace":"example.avro","type":"record","name":"User","fields":[{"name":"name","type":"string"},{"name":"favorite_number","type":["int","null"]},{"name":"favorite_color","type":["string","null"]}]}""" schema = avro.schema.parse(schema_str) @@ -103,7 +103,7 @@ def test_basic_sr_avro_serializer_with_auto_register_schemas(self, schemaregistr def test_basic_sr_avro_serializer_without_auto_register_schemas(self, schemaregistry_fully_qualified_namespace, schemaregistry_group, **kwargs): # TODO: AFTER RELEASING azure-schemaregistry=1.0.0b3, UPDATE 'endpoint' to 'fully_qualified_namespace' sr_client = self.create_basic_client(SchemaRegistryClient, endpoint=schemaregistry_fully_qualified_namespace) - sr_avro_serializer = SchemaRegistryAvroSerializer(client=sr_client, group_name=schemaregistry_group) + sr_avro_serializer = AvroSerializer(client=sr_client, group_name=schemaregistry_group) schema_str = """{"namespace":"example.avro","type":"record","name":"User","fields":[{"name":"name","type":"string"},{"name":"favorite_number","type":["int","null"]},{"name":"favorite_color","type":["string","null"]}]}""" schema = avro.schema.parse(schema_str) diff --git a/sdk/schemaregistry/azure-schemaregistry/azure/schemaregistry/serializer/__init__.py b/sdk/schemaregistry/azure-schemaregistry/azure/schemaregistry/serializer/__init__.py index c36aaed14908..80f86cb969ec 100644 --- a/sdk/schemaregistry/azure-schemaregistry/azure/schemaregistry/serializer/__init__.py +++ b/sdk/schemaregistry/azure-schemaregistry/azure/schemaregistry/serializer/__init__.py @@ -23,3 +23,4 @@ # IN THE SOFTWARE. # # -------------------------------------------------------------------------- +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore From ca6dba429f48214ac26b09d910426d2e1646fa30 Mon Sep 17 00:00:00 2001 From: iscai-msft <43154838+iscai-msft@users.noreply.github.com> Date: Fri, 1 Oct 2021 16:12:25 -0400 Subject: [PATCH 08/10] Rebase after merge. (#21014) Co-authored-by: Travis Prescott --- .../aio/operations/_documents_operations.py | 18 +++++++++--------- .../operations/_documents_operations.py | 18 +++++++++--------- .../aio/operations/_data_sources_operations.py | 10 +++++----- .../aio/operations/_indexers_operations.py | 18 +++++++++--------- .../aio/operations/_indexes_operations.py | 12 ++++++------ .../operations/_search_client_operations.py | 2 +- .../aio/operations/_skillsets_operations.py | 12 ++++++------ .../aio/operations/_synonym_maps_operations.py | 10 +++++----- .../operations/_data_sources_operations.py | 10 +++++----- .../operations/_indexers_operations.py | 18 +++++++++--------- .../operations/_indexes_operations.py | 12 ++++++------ .../operations/_search_client_operations.py | 2 +- .../operations/_skillsets_operations.py | 12 ++++++------ .../operations/_synonym_maps_operations.py | 10 +++++----- sdk/search/azure-search-documents/setup.py | 2 +- shared_requirements.txt | 2 +- 16 files changed, 84 insertions(+), 84 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py index f80a00395d23..faae528b930e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py @@ -80,7 +80,7 @@ async def count( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -212,7 +212,7 @@ async def search_get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -274,7 +274,7 @@ async def search_post( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -337,7 +337,7 @@ async def get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -432,7 +432,7 @@ async def suggest_get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -494,7 +494,7 @@ async def suggest_post( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -557,7 +557,7 @@ async def index( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 207]: @@ -652,7 +652,7 @@ async def autocomplete_get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -714,7 +714,7 @@ async def autocomplete_post( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py index 3bcd60f1e51d..be94d149dd8e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py @@ -510,7 +510,7 @@ def count( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -643,7 +643,7 @@ def search_get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -706,7 +706,7 @@ def search_post( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -770,7 +770,7 @@ def get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -866,7 +866,7 @@ def suggest_get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -929,7 +929,7 @@ def suggest_post( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -993,7 +993,7 @@ def index( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 207]: @@ -1089,7 +1089,7 @@ def autocomplete_get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -1152,7 +1152,7 @@ def autocomplete_post( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py index 42ad6b8b96d3..b2f776ae8c12 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py @@ -105,7 +105,7 @@ async def create_or_update( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: @@ -176,7 +176,7 @@ async def delete( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204, 404]: @@ -229,7 +229,7 @@ async def get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -288,7 +288,7 @@ async def list( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -349,7 +349,7 @@ async def create( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py index bafbd1894276..fd8df28bdb71 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py @@ -83,7 +83,7 @@ async def reset( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: @@ -152,7 +152,7 @@ async def reset_docs( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: @@ -205,7 +205,7 @@ async def run( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [202]: @@ -285,7 +285,7 @@ async def create_or_update( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: @@ -356,7 +356,7 @@ async def delete( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204, 404]: @@ -409,7 +409,7 @@ async def get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -468,7 +468,7 @@ async def list( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -529,7 +529,7 @@ async def create( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: @@ -586,7 +586,7 @@ async def get_status( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py index 7297cec7b06d..eb788bd34818 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py @@ -89,7 +89,7 @@ async def create( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: @@ -263,7 +263,7 @@ async def create_or_update( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: @@ -336,7 +336,7 @@ async def delete( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204, 404]: @@ -389,7 +389,7 @@ async def get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -446,7 +446,7 @@ async def get_statistics( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -511,7 +511,7 @@ async def analyze( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_client_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_client_operations.py index ed1c90f5d34f..1ae2ee9e4c00 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_client_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_client_operations.py @@ -59,7 +59,7 @@ async def get_service_statistics( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py index 900829d43761..222b83a7461d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py @@ -111,7 +111,7 @@ async def create_or_update( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: @@ -182,7 +182,7 @@ async def delete( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204, 404]: @@ -235,7 +235,7 @@ async def get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -294,7 +294,7 @@ async def list( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -355,7 +355,7 @@ async def create( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: @@ -421,7 +421,7 @@ async def reset_skills( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py index 655129db1551..de86d3288d0e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py @@ -101,7 +101,7 @@ async def create_or_update( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: @@ -172,7 +172,7 @@ async def delete( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204, 404]: @@ -225,7 +225,7 @@ async def get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -284,7 +284,7 @@ async def list( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -345,7 +345,7 @@ async def create( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py index ce887f96e5b3..f0fca120b0d5 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py @@ -307,7 +307,7 @@ def create_or_update( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: @@ -379,7 +379,7 @@ def delete( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204, 404]: @@ -433,7 +433,7 @@ def get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -493,7 +493,7 @@ def list( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -555,7 +555,7 @@ def create( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py index 6e28e2cbd2ae..30a3129eec29 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py @@ -438,7 +438,7 @@ def reset( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: @@ -508,7 +508,7 @@ def reset_docs( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: @@ -562,7 +562,7 @@ def run( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [202]: @@ -643,7 +643,7 @@ def create_or_update( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: @@ -715,7 +715,7 @@ def delete( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204, 404]: @@ -769,7 +769,7 @@ def get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -829,7 +829,7 @@ def list( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -891,7 +891,7 @@ def create( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: @@ -949,7 +949,7 @@ def get_status( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py index 60c586e32008..fceea50c44a2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py @@ -365,7 +365,7 @@ def create( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: @@ -540,7 +540,7 @@ def create_or_update( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: @@ -614,7 +614,7 @@ def delete( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204, 404]: @@ -668,7 +668,7 @@ def get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -726,7 +726,7 @@ def get_statistics( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -792,7 +792,7 @@ def analyze( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_client_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_client_operations.py index 8b7124ce5113..48151d99bb06 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_client_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_client_operations.py @@ -97,7 +97,7 @@ def get_service_statistics( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py index 811faaa9c8e1..5c212d7074c0 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py @@ -355,7 +355,7 @@ def create_or_update( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: @@ -427,7 +427,7 @@ def delete( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204, 404]: @@ -481,7 +481,7 @@ def get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -541,7 +541,7 @@ def list( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -603,7 +603,7 @@ def create( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: @@ -670,7 +670,7 @@ def reset_skills( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py index 620646412a94..fd1190e573c3 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py @@ -300,7 +300,7 @@ def create_or_update( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: @@ -372,7 +372,7 @@ def delete( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204, 404]: @@ -426,7 +426,7 @@ def get( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -486,7 +486,7 @@ def list( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: @@ -548,7 +548,7 @@ def create( } request.url = self._client.format_url(request.url, **path_format_arguments) - pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: diff --git a/sdk/search/azure-search-documents/setup.py b/sdk/search/azure-search-documents/setup.py index 6c3c34cdaae4..bcaace8b39ba 100644 --- a/sdk/search/azure-search-documents/setup.py +++ b/sdk/search/azure-search-documents/setup.py @@ -79,7 +79,7 @@ 'azure.search', ]), install_requires=[ - "azure-core<2.0.0,>=1.18.0", + "azure-core<2.0.0,>=1.19.0", "msrest>=0.6.21", "azure-common~=1.1", "typing-extensions" diff --git a/shared_requirements.txt b/shared_requirements.txt index 66d9e014b64e..3c9254069d21 100644 --- a/shared_requirements.txt +++ b/shared_requirements.txt @@ -153,7 +153,7 @@ backports.functools-lru-cache>=1.6.4 #override azure-ai-textanalytics azure-core<2.0.0,>=1.14.0 #override azure-ai-language-questionanswering azure-core<2.0.0,>=1.19.0 #override azure-ai-language-questionanswering msrest>=0.6.21 -#override azure-search-documents azure-core<2.0.0,>=1.18.0 +#override azure-search-documents azure-core<2.0.0,>=1.19.0 #override azure-ai-formrecognizer msrest>=0.6.21 #override azure-ai-formrecognizer azure-core<2.0.0,>=1.13.0 #override azure-storage-blob azure-core<2.0.0,>=1.10.0 From 14f907633841ee6f614b79b37d88b9d2fef8332b Mon Sep 17 00:00:00 2001 From: catalinaperalta Date: Fri, 1 Oct 2021 16:16:04 -0400 Subject: [PATCH 09/10] [formrecognizer] Add migration guide (#20960) * add migration guide * title fix * add more examples * update wording * some review feedback and fixes * more fixes * additional doc fixes * add full samples to guide * improve content table, add more info and change subheader * reorganize, add more info about model ids * fix links * fix relative links * fix spelling errors, add Mari's feedback * review feedback from Krista * grammar * add manage models section * more feedback --- .../MIGRATION_GUIDE.md | 651 ++++++++++++++++++ 1 file changed, 651 insertions(+) create mode 100644 sdk/formrecognizer/azure-ai-formrecognizer/MIGRATION_GUIDE.md diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/MIGRATION_GUIDE.md b/sdk/formrecognizer/azure-ai-formrecognizer/MIGRATION_GUIDE.md new file mode 100644 index 000000000000..340857f86d71 --- /dev/null +++ b/sdk/formrecognizer/azure-ai-formrecognizer/MIGRATION_GUIDE.md @@ -0,0 +1,651 @@ +# Guide for migrating azure-ai-formrecognizer to version 3.2.x from versions 3.1.x and below + +This guide is intended to assist in the migration to `azure-ai-formrecognizer (3.2.x)` from versions `3.1.x` and below. It will focus on side-by-side comparisons for similar operations between versions. Please note that version `3.2.0b1` will be used for comparison with `3.1.2`. + +Familiarity with `azure-ai-formrecognizer (3.1.x and below)` package is assumed. For those new to the Azure Form Recognizer client library for Python please refer to the [README][readme] rather than this guide. + +## Table of Contents +- [Migration benefits](#migration-benefits) +- [Important changes](#important-changes) + - [Client usage](#client-usage) + - [Analyzing document](#analyzing-documents) + - [Analyzing a document with a custom model](#analyzing-a-document-with-a-custom-model) + - [Training a custom model](#training-a-custom-model) + - [Manage models](#manage-models) +- [Additional samples](#additional-samples) + +## Migration benefits + +A natural question to ask when considering whether to adopt a new version of the library is what the benefits of doing so would be. As Azure Form Recognizer has matured and been embraced by a more diverse group of developers, we have been focused on learning the patterns and practices to best support developer productivity and add value to our customers. + +There are many benefits to using the new design of the `azure-ai-formrecognizer (3.2.x)` library. This new version of the library introduces two new clients `DocumentAnalysisClient` and the `DocumentModelAdministrationClient` with unified methods for analyzing documents and provides support for the new features added by the service in API version `2021-09-30-preview` and later. + +New features provided by the `DocumentAnalysisClient` include one consolidated method for analyzing document layout, a general prebuilt document model type, along with the same prebuilt models that were included previously (receipts, invoices, business cards, identity documents), and custom models. Moreover, the models introduced in the latest version of the library, such as `AnalyzeResult`, remove hierarchical dependencies between document elements and move them to a more top level and easily accessible position. The service has further improved how to define where elements are located on documents by moving towards `BoundingRegion` definitions allowing for cross-page elements. Document element fields are returned with more information, such as content and spans. + +When using the `DocumentModelAdministrationClient` to build, compose, or copy models, users can now assign their own model IDs and specify a description. Listing models on the administration client now includes both prebuilt and custom models. When using `get_model()`, users can get the field schema (field names and types that the model can extract) for the model they specified, including for prebuilt models. This client also provides functions for getting information from model operations. + +The below table describes the relationship of each client and its supported API version(s): + +|API version|Supported clients +|-|- +|2021-09-30-preview | DocumentAnalysisClient and DocumentModelAdministrationClient +|2.1 | FormRecognizerClient and FormTrainingClient +|2.0 | FormRecognizerClient and FormTrainingClient + +Please refer to the [README][readme] for more information on these new clients. + +## Important changes + +### Client usage + +We continue to support API key and AAD authentication methods when creating the clients. Below are the differences between the two versions: + +- In `3.2.x`, we have added `DocumentAnalysisClient` and `DocumentModelAdministrationClient` which support API version `2021-09-30-preview` and later. +- `FormRecognizerClient` and `FormTrainingClient` will raise an error if called with an API version of `2021-09-30-preview` and later. +- In `DocumentAnalysisClient` all prebuilt model methods along with custom model, layout, and a prebuilt document analysis model are unified into two methods called +`begin_analyze_document` and `begin_analyze_document_from_url`. +- In `FormRecognizerClient` there are two methods (a stream and URL method) for each of the prebuilt models supported by the service. This results in two methods for business card, receipt, identity document, and invoice models, along with a pair of methods for recognizing custom documents and for recognizing content/layout. + +Creating new clients in `3.1.x`: +```python +from azure.core.credentials import AzureKeyCredential +from azure.ai.formrecognizer import FormRecognizerClient, FormTrainingClient + +endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"] +key = os.environ["AZURE_FORM_RECOGNIZER_KEY"] + +form_recognizer_client = FormRecognizerClient( + endpoint=endpoint, credential=AzureKeyCredential(key) +) + +form_training_client = FormTrainingClient( + endpoint=endpoint, credential=AzureKeyCredential(key) +) +``` + +Creating new clients in `3.2.x`: +```python +from azure.core.credentials import AzureKeyCredential +from azure.ai.formrecognizer import DocumentAnalysisClient, DocumentModelAdministrationClient + +endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"] +key = os.environ["AZURE_FORM_RECOGNIZER_KEY"] + +document_analysis_client = DocumentAnalysisClient( + endpoint=endpoint, credential=AzureKeyCredential(key) +) + +document_model_admin_client = DocumentModelAdministrationClient( + endpoint=endpoint, credential=AzureKeyCredential(key) +) +``` + +### Analyzing documents + +Differences between the versions: +- `begin_analyze_document` and `begin_analyze_document_from_url` accept a string with the desired model ID for analysis. The model ID can be any of the prebuilt model IDs or a custom model ID. +- Along with more consolidated analysis methods in the `DocumentAnalysisClient`, the return types have also been improved and remove the hierarchical dependencies between elements. An instance of the `AnalyzeResult` model is now returned which showcases important document elements, such as key-value pairs, entities, tables, and document fields and values, among others, at the top level of the returned model. This can be contrasted with `RecognizedForm` which included more hierarchical relationships, for instance tables were an element of a `FormPage` and not a top-level element. +- In the new version of the library, the functionality of `begin_recognize_content` has been added as a prebuilt model and can be called in library version `azure-ai-formrecognizer (3.2.x)` with `begin_analyze_document` by passing in the `prebuilt-layout` model ID. Similarly, to get general prebuilt document information, such as key-value pairs, entities, and text layout, the `prebuilt-document` model ID can be used with `begin_analyze_document`. +- When calling `begin_analyze_document` and `begin_analyze_document_from_url` the returned type is an `AnalyzeResult` object, while the various methods used with `FormRecognizerClient` return a list of `RecognizedForm`. +- The `pages` keyword argument is a string with library version `azure-ai-formrecognizer (3.2.x)`. In `azure-ai-formrecognizer (3.1.x)`, `pages` was a list of strings. +- The `include_field_elements` keyword argument is not supported with the `DocumentAnalysisClient`, text details are automatically included with API version `2021-09-30-preview` and later. +- The `reading_order` keyword argument does not exist on `begin_analyze_document` and `begin_analyze_document_from_url`. The service uses `natural` reading order to return data. + +Analyzing prebuilt models like business cards, identity documents, invoices, and receipts with `3.1.x`: +```python +with open(path_to_sample_forms, "rb") as f: + poller = form_recognizer_client.begin_recognize_receipts(receipt=f, locale="en-US") +receipts = poller.result() + +for idx, receipt in enumerate(receipts): + print("--------Recognizing receipt #{}--------".format(idx+1)) + receipt_type = receipt.fields.get("ReceiptType") + if receipt_type: + print("Receipt Type: {} has confidence: {}".format(receipt_type.value, receipt_type.confidence)) + merchant_name = receipt.fields.get("MerchantName") + if merchant_name: + print("Merchant Name: {} has confidence: {}".format(merchant_name.value, merchant_name.confidence)) + transaction_date = receipt.fields.get("TransactionDate") + if transaction_date: + print("Transaction Date: {} has confidence: {}".format(transaction_date.value, transaction_date.confidence)) + if receipt.fields.get("Items"): + print("Receipt items:") + for idx, item in enumerate(receipt.fields.get("Items").value): + print("...Item #{}".format(idx+1)) + item_name = item.value.get("Name") + if item_name: + print("......Item Name: {} has confidence: {}".format(item_name.value, item_name.confidence)) + item_quantity = item.value.get("Quantity") + if item_quantity: + print("......Item Quantity: {} has confidence: {}".format(item_quantity.value, item_quantity.confidence)) + item_price = item.value.get("Price") + if item_price: + print("......Individual Item Price: {} has confidence: {}".format(item_price.value, item_price.confidence)) + item_total_price = item.value.get("TotalPrice") + if item_total_price: + print("......Total Item Price: {} has confidence: {}".format(item_total_price.value, item_total_price.confidence)) + subtotal = receipt.fields.get("Subtotal") + if subtotal: + print("Subtotal: {} has confidence: {}".format(subtotal.value, subtotal.confidence)) + tax = receipt.fields.get("Tax") + if tax: + print("Tax: {} has confidence: {}".format(tax.value, tax.confidence)) + tip = receipt.fields.get("Tip") + if tip: + print("Tip: {} has confidence: {}".format(tip.value, tip.confidence)) + total = receipt.fields.get("Total") + if total: + print("Total: {} has confidence: {}".format(total.value, total.confidence)) + print("--------------------------------------") +``` + +Analyzing prebuilt models like business cards, identity documents, invoices, and receipts with `3.2.x`: +```python +with open(path_to_sample_documents, "rb") as f: + poller = document_analysis_client.begin_analyze_document( + "prebuilt-receipt", document=f, locale="en-US" + ) +receipts = poller.result() + +for idx, receipt in enumerate(receipts.documents): + print("--------Recognizing receipt #{}--------".format(idx + 1)) + receipt_type = receipt.fields.get("ReceiptType") + if receipt_type: + print( + "Receipt Type: {} has confidence: {}".format( + receipt_type.value, receipt_type.confidence + ) + ) + merchant_name = receipt.fields.get("MerchantName") + if merchant_name: + print( + "Merchant Name: {} has confidence: {}".format( + merchant_name.value, merchant_name.confidence + ) + ) + transaction_date = receipt.fields.get("TransactionDate") + if transaction_date: + print( + "Transaction Date: {} has confidence: {}".format( + transaction_date.value, transaction_date.confidence + ) + ) + if receipt.fields.get("Items"): + print("Receipt items:") + for idx, item in enumerate(receipt.fields.get("Items").value): + print("...Item #{}".format(idx + 1)) + item_name = item.value.get("Name") + if item_name: + print( + "......Item Name: {} has confidence: {}".format( + item_name.value, item_name.confidence + ) + ) + item_quantity = item.value.get("Quantity") + if item_quantity: + print( + "......Item Quantity: {} has confidence: {}".format( + item_quantity.value, item_quantity.confidence + ) + ) + item_price = item.value.get("Price") + if item_price: + print( + "......Individual Item Price: {} has confidence: {}".format( + item_price.value, item_price.confidence + ) + ) + item_total_price = item.value.get("TotalPrice") + if item_total_price: + print( + "......Total Item Price: {} has confidence: {}".format( + item_total_price.value, item_total_price.confidence + ) + ) + subtotal = receipt.fields.get("Subtotal") + if subtotal: + print( + "Subtotal: {} has confidence: {}".format( + subtotal.value, subtotal.confidence + ) + ) + tax = receipt.fields.get("Tax") + if tax: + print("Tax: {} has confidence: {}".format(tax.value, tax.confidence)) + tip = receipt.fields.get("Tip") + if tip: + print("Tip: {} has confidence: {}".format(tip.value, tip.confidence)) + total = receipt.fields.get("Total") + if total: + print("Total: {} has confidence: {}".format(total.value, total.confidence)) + print("--------------------------------------") +``` + +Analyzing document content with `3.1.x`: + +> NOTE: With version `3.1.x` of the library this method was called with a `language` keyword argument to hint at the language for the document, whereas in version `3.2.x` of the library `locale` is used for this purpose. + +```python +with open(path_to_sample_forms, "rb") as f: + poller = form_recognizer_client.begin_recognize_content(form=f) +form_pages = poller.result() + +for idx, content in enumerate(form_pages): + print("----Recognizing content from page #{}----".format(idx+1)) + print("Page has width: {} and height: {}, measured with unit: {}".format( + content.width, + content.height, + content.unit + )) + for table_idx, table in enumerate(content.tables): + print("Table # {} has {} rows and {} columns".format(table_idx, table.row_count, table.column_count)) + print("Table # {} location on page: {}".format(table_idx, format_bounding_box(table.bounding_box))) + for cell in table.cells: + print("...Cell[{}][{}] has text '{}' within bounding box '{}'".format( + cell.row_index, + cell.column_index, + cell.text, + format_bounding_box(cell.bounding_box) + )) + + for line_idx, line in enumerate(content.lines): + print("Line # {} has word count '{}' and text '{}' within bounding box '{}'".format( + line_idx, + len(line.words), + line.text, + format_bounding_box(line.bounding_box) + )) + if line.appearance: + if line.appearance.style_name == "handwriting" and line.appearance.style_confidence > 0.8: + print("Text line '{}' is handwritten and might be a signature.".format(line.text)) + for word in line.words: + print("...Word '{}' has a confidence of {}".format(word.text, word.confidence)) + + for selection_mark in content.selection_marks: + print("Selection mark is '{}' within bounding box '{}' and has a confidence of {}".format( + selection_mark.state, + format_bounding_box(selection_mark.bounding_box), + selection_mark.confidence + )) + print("----------------------------------------") +``` + + +Analyzing document layout with `3.2.x`: +```python +with open(path_to_sample_documents, "rb") as f: + poller = document_analysis_client.begin_analyze_document( + "prebuilt-layout", document=f + ) +result = poller.result() + +for idx, style in enumerate(result.styles): + print( + "Document contains {} content".format( + "handwritten" if style.is_handwritten else "no handwritten" + ) + ) + +for idx, page in enumerate(result.pages): + print("----Analyzing layout from page #{}----".format(idx + 1)) + print( + "Page has width: {} and height: {}, measured with unit: {}".format( + page.width, page.height, page.unit + ) + ) + + for line_idx, line in enumerate(page.lines): + print( + "Line # {} has text content '{}' within bounding box '{}'".format( + line_idx, + line.content, + format_bounding_box(line.bounding_box), + ) + ) + + for word in page.words: + print( + "...Word '{}' has a confidence of {}".format( + word.content, word.confidence + ) + ) + + for selection_mark in page.selection_marks: + print( + "Selection mark is '{}' within bounding box '{}' and has a confidence of {}".format( + selection_mark.state, + format_bounding_box(selection_mark.bounding_box), + selection_mark.confidence, + ) + ) + +for table_idx, table in enumerate(result.tables): + print( + "Table # {} has {} rows and {} columns".format( + table_idx, table.row_count, table.column_count + ) + ) + for region in table.bounding_regions: + print( + "Table # {} location on page: {} is {}".format( + table_idx, + region.page_number, + format_bounding_box(region.bounding_box), + ) + ) + for cell in table.cells: + print( + "...Cell[{}][{}] has text '{}'".format( + cell.row_index, + cell.column_index, + cell.content, + ) + ) + for region in cell.bounding_regions: + print( + "...content on page {} is within bounding box '{}'".format( + region.page_number, + format_bounding_box(region.bounding_box), + ) + ) + +print("----------------------------------------") +``` + +Analyzing general prebuilt document types with `3.2.x`: + +> NOTE: Analyzing a document with the `prebuilt-document` model replaces training without labels in version `3.1.x` of the library. + +```python +with open(path_to_sample_documents, "rb") as f: + poller = document_analysis_client.begin_analyze_document( + "prebuilt-document", document=f + ) +result = poller.result() + +for style in result.styles: + print( + "Document contains {} content".format( + "handwritten" if style.is_handwritten else "no handwritten" + ) + ) + +for page in result.pages: + print("----Analyzing document from page #{}----".format(page.page_number)) + print( + "Page has width: {} and height: {}, measured with unit: {}".format( + page.width, page.height, page.unit + ) + ) + + for line_idx, line in enumerate(page.lines): + print( + "...Line # {} has text content '{}' within bounding box '{}'".format( + line_idx, + line.content, + format_bounding_box(line.bounding_box), + ) + ) + + for word in page.words: + print( + "...Word '{}' has a confidence of {}".format( + word.content, word.confidence + ) + ) + + for selection_mark in page.selection_marks: + print( + "...Selection mark is '{}' within bounding box '{}' and has a confidence of {}".format( + selection_mark.state, + format_bounding_box(selection_mark.bounding_box), + selection_mark.confidence, + ) + ) + +for table_idx, table in enumerate(result.tables): + print( + "Table # {} has {} rows and {} columns".format( + table_idx, table.row_count, table.column_count + ) + ) + for region in table.bounding_regions: + print( + "Table # {} location on page: {} is {}".format( + table_idx, + region.page_number, + format_bounding_box(region.bounding_box), + ) + ) + for cell in table.cells: + print( + "...Cell[{}][{}] has content '{}'".format( + cell.row_index, + cell.column_index, + cell.content, + ) + ) + for region in cell.bounding_regions: + print( + "...content on page {} is within bounding box '{}'\n".format( + region.page_number, + format_bounding_box(region.bounding_box), + ) + ) + +print("----Entities found in document----") +for entity in result.entities: + print("Entity of category '{}' with sub-category '{}'".format(entity.category, entity.sub_category)) + print("...has content '{}'".format(entity.content)) + print("...within '{}' bounding regions".format(format_bounding_region(entity.bounding_regions))) + print("...with confidence {}\n".format(entity.confidence)) + +print("----Key-value pairs found in document----") +for kv_pair in result.key_value_pairs: + if kv_pair.key: + print( + "Key '{}' found within '{}' bounding regions".format( + kv_pair.key.content, + format_bounding_region(kv_pair.key.bounding_regions), + ) + ) + if kv_pair.value: + print( + "Value '{}' found within '{}' bounding regions\n".format( + kv_pair.value.content, + format_bounding_region(kv_pair.value.bounding_regions), + ) + ) +print("----------------------------------------") +``` + +> NOTE: All of these samples also work with `begin_analyze_document_from_url` when providing a valid URL to the document. + +### Analyzing a document with a custom model + +Differences between the versions: +- Analyzing a custom model with `DocumentAnalysisClient` uses the general `begin_analyze_document` and `begin_analyze_document_from_url` methods. +- In order to analyze a custom model with `FormRecognizerClient` the `begin_recognize_custom_models` and its corresponding URL methods are used. +- The `include_field_elements` keyword argument is not supported with the `DocumentAnalysisClient`, text details are automatically included with API version `2021-09-30-preview` and later. + +Analyze custom document with `3.1.x`: +```python +with open(path_to_sample_forms, "rb") as f: + poller = form_recognizer_client.begin_recognize_custom_forms( + model_id=model_id, form=f, include_field_elements=True + ) +forms = poller.result() + +for idx, form in enumerate(forms): + print("--------Recognizing Form #{}--------".format(idx+1)) + print("Form has type {}".format(form.form_type)) + print("Form has form type confidence {}".format(form.form_type_confidence)) + print("Form was analyzed with model with ID {}".format(form.model_id)) + for name, field in form.fields.items(): + # each field is of type FormField + # label_data is populated if you are using a model trained without labels, + # since the service needs to make predictions for labels if not explicitly given to it. + if field.label_data: + print("...Field '{}' has label '{}' with a confidence score of {}".format( + name, + field.label_data.text, + field.confidence + )) + + print("...Label '{}' has value '{}' with a confidence score of {}".format( + field.label_data.text if field.label_data else name, field.value, field.confidence + )) + + # iterate over tables, lines, and selection marks on each page + for page in form.pages: + for i, table in enumerate(page.tables): + print("\nTable {} on page {}".format(i+1, table.page_number)) + for cell in table.cells: + print("...Cell[{}][{}] has text '{}' with confidence {}".format( + cell.row_index, cell.column_index, cell.text, cell.confidence + )) + print("\nLines found on page {}".format(page.page_number)) + for line in page.lines: + print("...Line '{}' is made up of the following words: ".format(line.text)) + for word in line.words: + print("......Word '{}' has a confidence of {}".format( + word.text, + word.confidence + )) + if page.selection_marks: + print("\nSelection marks found on page {}".format(page.page_number)) + for selection_mark in page.selection_marks: + print("......Selection mark is '{}' and has a confidence of {}".format( + selection_mark.state, + selection_mark.confidence + )) + + print("-----------------------------------") +``` + +Analyze custom document with `3.2.x`: +```python +with open(path_to_sample_documents, "rb") as f: + poller = document_analysis_client.begin_analyze_document( + model=model_id, document=f + ) +result = poller.result() + +for idx, document in enumerate(result.documents): + print("--------Analyzing document #{}--------".format(idx + 1)) + print("Document has type {}".format(document.doc_type)) + print("Document has document type confidence {}".format(document.confidence)) + print("Document was analyzed with model with ID {}".format(result.model_id)) + for name, field in document.fields.items(): + field_value = field.value if field.value else field.content + print("......found field of type '{}' with value '{}' and with confidence {}".format(field.value_type, field_value, field.confidence)) + + +# iterate over tables, lines, and selection marks on each page +for page in result.pages: + print("\nLines found on page {}".format(page.page_number)) + for line in page.lines: + print("...Line '{}'".format(line.content)) + for word in page.words: + print( + "...Word '{}' has a confidence of {}".format( + word.content, word.confidence + ) + ) + if page.selection_marks: + print("\nSelection marks found on page {}".format(page.page_number)) + for selection_mark in page.selection_marks: + print( + "...Selection mark is '{}' and has a confidence of {}".format( + selection_mark.state, selection_mark.confidence + ) + ) + +for i, table in enumerate(result.tables): + print("\nTable {} can be found on page:".format(i + 1)) + for region in table.bounding_regions: + print("...{}".format(i + 1, region.page_number)) + for cell in table.cells: + print( + "...Cell[{}][{}] has text '{}'".format( + cell.row_index, cell.column_index, cell.content + ) + ) +print("-----------------------------------") +``` + +### Training a custom model + +Differences between the versions: +- Files for building a new model for version `3.2.x` can be created using the labeling tool found [here][fr_labeling_tool]. +- In version `3.1.x` the `use_training_labels` keyword argument was used to indicate whether to use labeled data when creating the custom model. +- In version `3.2.x` the `use_training_labels` keyword argument is not supported since training must be carried out with labeled training documents. Additionally train without labels is now replaced with the prebuilt model "prebuilt-document" which extracts entities, key-value pairs, and layout from a document. + +Train a custom model with `3.1.x`: +```python +form_training_client = FormTrainingClient(endpoint, AzureKeyCredential(key)) +poller = form_training_client.begin_training( + container_sas_url, use_training_labels=True, model_name="mymodel" +) +model = poller.result() + +# Custom model information +print("Model ID: {}".format(model.model_id)) +print("Status: {}".format(model.status)) +print("Model name: {}".format(model.model_name)) +print("Is this a composed model?: {}".format(model.properties.is_composed_model)) +print("Training started on: {}".format(model.training_started_on)) +print("Training completed on: {}".format(model.training_completed_on)) + +print("Recognized fields:") +# looping through the submodels, which contains the fields they were trained on +for submodel in model.submodels: + print("...The submodel has model ID: {}".format(submodel.model_id)) + print("...The submodel with form type {} has an average accuracy '{}'".format( + submodel.form_type, submodel.accuracy + )) + for name, field in submodel.fields.items(): + print("...The model found the field '{}' with an accuracy of {}".format( + name, field.accuracy + )) + +# Training result information +for doc in model.training_documents: + print("Document name: {}".format(doc.name)) + print("Document status: {}".format(doc.status)) + print("Document page count: {}".format(doc.page_count)) + print("Document errors: {}".format(doc.errors)) +``` + +Train a custom model with `3.2.x`: +```python +document_model_admin_client = DocumentModelAdministrationClient(endpoint, AzureKeyCredential(key)) +poller = document_model_admin_client.begin_build_model( + container_sas_url, model_id="my-model-id", description="my model description" +) +model = poller.result() + +print("Model ID: {}".format(model.model_id)) +print("Description: {}".format(model.description)) +print("Model created on: {}\n".format(model.created_on)) +print("Doc types the model can recognize:") +for name, doc_type in model.doc_types.items(): + print("\nDoc Type: '{}' which has the following fields:".format(name)) + for field_name, confidence in doc_type.field_confidence.items(): + print("Field: '{}' has confidence score {}".format(field_name, confidence)) +``` + +### Manage models + +Differences between the versions: +- When using API version `2021-09-30-preview` and later models no longer include submodels, instead a model can analyze different document types. +- When building, composing, or copying models users can now assign their own model IDs and specify a description. +- In version `3.2.x` of the library, only models that build successfully can be retrieved from the get and list model calls. Unsuccessful model operations can be viewed with the get and list operation methods (note that document model operation data persists for only 24 hours). In version `3.1.x` of the library, models that had not succeeded were still created, had to be deleted by the user, and were returned in the list models response. + +## Additional samples + +For additional samples please take a look at the [Form Recognizer Samples][samples_readme] for more guidance. + +[readme]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/README.md +[samples_readme]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/README.md +[fr_labeling_tool]: https://aka.ms/azsdk/formrecognizer/labelingtool \ No newline at end of file From 01d0b99e53a1995b39e59649bf3d550dc5408875 Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Fri, 1 Oct 2021 13:20:37 -0700 Subject: [PATCH 10/10] Update Multi-Tenant support to incorporate Arch Board Feedback (#20940) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update Multi-Tenant support to incorporate Arch Board Feedback * update * update * updates * update * update * updates * update * update * update * Update sdk/identity/azure-identity/CHANGELOG.md Co-authored-by: McCoy Patiño <39780829+mccoyp@users.noreply.github.com> Co-authored-by: McCoy Patiño <39780829+mccoyp@users.noreply.github.com> --- sdk/identity/azure-identity/CHANGELOG.md | 12 +- .../azure-identity/azure/identity/__init__.py | 2 - .../azure/identity/_constants.py | 2 +- .../identity/_credentials/application.py | 4 - .../_credentials/authorization_code.py | 6 +- .../azure/identity/_credentials/azure_cli.py | 14 +- .../identity/_credentials/azure_powershell.py | 15 +- .../azure/identity/_credentials/browser.py | 3 - .../identity/_credentials/certificate.py | 6 - .../identity/_credentials/client_secret.py | 6 - .../azure/identity/_credentials/default.py | 10 +- .../identity/_credentials/device_code.py | 3 - .../identity/_credentials/environment.py | 7 +- .../identity/_credentials/on_behalf_of.py | 3 - .../identity/_credentials/shared_cache.py | 4 - .../azure/identity/_credentials/silent.py | 3 +- .../identity/_credentials/user_password.py | 3 - .../azure/identity/_credentials/vscode.py | 3 - .../azure/identity/_internal/__init__.py | 15 +- .../identity/_internal/aad_client_base.py | 9 +- .../identity/_internal/get_token_mixin.py | 3 +- .../azure/identity/_internal/interactive.py | 3 +- .../identity/_internal/msal_credentials.py | 8 +- .../identity/aio/_credentials/application.py | 4 - .../aio/_credentials/authorization_code.py | 6 +- .../identity/aio/_credentials/azure_cli.py | 12 +- .../aio/_credentials/azure_powershell.py | 12 +- .../identity/aio/_credentials/certificate.py | 3 - .../aio/_credentials/client_secret.py | 3 - .../identity/aio/_credentials/default.py | 10 +- .../identity/aio/_credentials/environment.py | 7 +- .../identity/aio/_credentials/on_behalf_of.py | 5 +- .../identity/aio/_credentials/shared_cache.py | 6 +- .../azure/identity/aio/_credentials/vscode.py | 6 +- .../identity/aio/_internal/get_token_mixin.py | 3 +- .../azure-identity/tests/test_aad_client.py | 6 +- .../tests/test_aad_client_async.py | 6 +- .../azure-identity/tests/test_auth_code.py | 19 +-- .../tests/test_auth_code_async.py | 19 +-- .../tests/test_certificate_credential.py | 33 +--- .../test_certificate_credential_async.py | 19 +-- .../tests/test_cli_credential.py | 20 +-- .../tests/test_cli_credential_async.py | 20 +-- .../tests/test_client_secret_credential.py | 33 +--- .../test_client_secret_credential_async.py | 20 +-- .../azure-identity/tests/test_default.py | 30 ---- .../tests/test_default_async.py | 27 --- .../tests/test_interactive_credential.py | 18 +- sdk/identity/azure-identity/tests/test_obo.py | 6 +- .../azure-identity/tests/test_obo_async.py | 6 +- .../tests/test_powershell_credential.py | 20 +-- .../tests/test_powershell_credential_async.py | 20 +-- .../tests/test_shared_cache_credential.py | 154 +++++------------- .../test_shared_cache_credential_async.py | 19 +-- .../tests/test_vscode_credential.py | 20 +-- .../tests/test_vscode_credential_async.py | 20 +-- 56 files changed, 157 insertions(+), 599 deletions(-) diff --git a/sdk/identity/azure-identity/CHANGELOG.md b/sdk/identity/azure-identity/CHANGELOG.md index 49c726f3f0a2..64847d7e5a20 100644 --- a/sdk/identity/azure-identity/CHANGELOG.md +++ b/sdk/identity/azure-identity/CHANGELOG.md @@ -2,13 +2,15 @@ ## 1.7.0b5 (Unreleased) -### Features Added - ### Breaking Changes +> These changes do not impact the API of stable versions such as 1.6.0. +> Only code written against a beta version such as 1.7.0b1 may be affected. -### Bugs Fixed - -### Other Changes +- The `allow_multitenant_authentication` argument has been removed and the default behavior is now as if it were true. + The multitenant authentication feature can be totally disabled by setting the environment variable + `AZURE_IDENTITY_DISABLE_MULTITENANTAUTH` to `True`. +- `azure.identity.RegionalAuthority` is removed. +- `regional_authority` argument is removed for `CertificateCredential` and `ClientSecretCredential` ## 1.7.0b4 (2021-09-09) diff --git a/sdk/identity/azure-identity/azure/identity/__init__.py b/sdk/identity/azure-identity/azure/identity/__init__.py index 4d7dff365695..0969ad2e0504 100644 --- a/sdk/identity/azure-identity/azure/identity/__init__.py +++ b/sdk/identity/azure-identity/azure/identity/__init__.py @@ -5,7 +5,6 @@ """Credentials for Azure SDK clients.""" from ._auth_record import AuthenticationRecord -from ._enums import RegionalAuthority from ._exceptions import AuthenticationRequiredError, CredentialUnavailableError from ._constants import AzureAuthorityHosts, KnownAuthorities from ._credentials import ( @@ -47,7 +46,6 @@ "InteractiveBrowserCredential", "KnownAuthorities", "OnBehalfOfCredential", - "RegionalAuthority", "ManagedIdentityCredential", "SharedTokenCacheCredential", "TokenCachePersistenceOptions", diff --git a/sdk/identity/azure-identity/azure/identity/_constants.py b/sdk/identity/azure-identity/azure/identity/_constants.py index 878d7f6bce7f..4cf9fb2d9287 100644 --- a/sdk/identity/azure-identity/azure/identity/_constants.py +++ b/sdk/identity/azure-identity/azure/identity/_constants.py @@ -44,7 +44,7 @@ class EnvironmentVariables: MSI_SECRET = "MSI_SECRET" AZURE_AUTHORITY_HOST = "AZURE_AUTHORITY_HOST" - AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION = "AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION" + AZURE_IDENTITY_DISABLE_MULTITENANTAUTH = "AZURE_IDENTITY_DISABLE_MULTITENANTAUTH" AZURE_REGIONAL_AUTHORITY_NAME = "AZURE_REGIONAL_AUTHORITY_NAME" AZURE_FEDERATED_TOKEN_FILE = "AZURE_FEDERATED_TOKEN_FILE" diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/application.py b/sdk/identity/azure-identity/azure/identity/_credentials/application.py index 46a84e13eb7b..abb22fbaabac 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/application.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/application.py @@ -48,10 +48,6 @@ class AzureApplicationCredential(ChainedTokenCredential): `_ for an overview of managed identities. - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the application or user is registered in. When False, which is the default, the credential will acquire tokens - only from the tenant specified by **AZURE_TENANT_ID**. This argument doesn't apply to managed identity - authentication. :keyword str authority: Authority of an Azure Active Directory endpoint, for example "login.microsoftonline.com", the authority for Azure Public Cloud, which is the default when no value is given for this keyword argument or environment variable AZURE_AUTHORITY_HOST. :class:`~azure.identity.AzureAuthorityHosts` defines authorities for diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/authorization_code.py b/sdk/identity/azure-identity/azure/identity/_credentials/authorization_code.py index 587547640744..7eae087b837c 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/authorization_code.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/authorization_code.py @@ -30,9 +30,6 @@ class AuthorizationCodeCredential(GetTokenMixin): the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts` defines authorities for other clouds. :keyword str client_secret: One of the application's client secrets. Required only for web apps and web APIs. - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the user is registered in. When False, which is the default, the credential will acquire tokens only from the - user's home tenant or the tenant specified by **tenant_id**. """ def __init__(self, tenant_id, client_id, authorization_code, redirect_uri, **kwargs): @@ -67,8 +64,7 @@ def get_token(self, *scopes, **kwargs): redeeming the authorization code. :param str scopes: desired scopes for the access token. This method requires at least one scope. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` :raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The error's ``message`` diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/azure_cli.py b/sdk/identity/azure-identity/azure/identity/_credentials/azure_cli.py index d535a286adb7..a94e001852ec 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/azure_cli.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/azure_cli.py @@ -35,15 +35,8 @@ class AzureCliCredential(object): """Authenticates by requesting a token from the Azure CLI. This requires previously logging in to Azure via "az login", and will use the CLI's currently logged in identity. - - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the identity logged in to the Azure CLI is registered in. When False, which is the default, the credential will - acquire tokens only from the tenant of the Azure CLI's active subscription. """ - def __init__(self, **kwargs): - self._allow_multitenant = kwargs.get("allow_multitenant_authentication", False) - def __enter__(self): return self @@ -55,7 +48,7 @@ def close(self): """Calling this method is unnecessary.""" @log_get_token("AzureCliCredential") - def get_token(self, *scopes, **kwargs): + def get_token(self, *scopes, **kwargs): # pylint: disable=no-self-use # type: (*str, **Any) -> AccessToken """Request an access token for `scopes`. @@ -63,8 +56,7 @@ def get_token(self, *scopes, **kwargs): also handle token caching because this credential doesn't cache the tokens it acquires. :param str scopes: desired scope for the access token. This credential allows only one scope per request. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` @@ -75,7 +67,7 @@ def get_token(self, *scopes, **kwargs): resource = _scopes_to_resource(*scopes) command = COMMAND_LINE.format(resource) - tenant = resolve_tenant("", self._allow_multitenant, **kwargs) + tenant = resolve_tenant("", **kwargs) if tenant: command += " --tenant " + tenant output = _run_command(command) diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/azure_powershell.py b/sdk/identity/azure-identity/azure/identity/_credentials/azure_powershell.py index 17869fbde253..78c34b81d76e 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/azure_powershell.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/azure_powershell.py @@ -51,16 +51,8 @@ class AzurePowerShellCredential(object): """Authenticates by requesting a token from Azure PowerShell. This requires previously logging in to Azure via "Connect-AzAccount", and will use the currently logged in identity. - - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the identity logged in to Azure PowerShell is registered in. When False, which is the default, the credential - will acquire tokens only from the tenant of Azure PowerShell's active subscription. """ - def __init__(self, **kwargs): - # type: (**Any) -> None - self._allow_multitenant = kwargs.get("allow_multitenant_authentication", False) - def __enter__(self): return self @@ -72,7 +64,7 @@ def close(self): """Calling this method is unnecessary.""" @log_get_token("AzurePowerShellCredential") - def get_token(self, *scopes, **kwargs): + def get_token(self, *scopes, **kwargs): # pylint: disable=no-self-use # type: (*str, **Any) -> AccessToken """Request an access token for `scopes`. @@ -80,8 +72,7 @@ def get_token(self, *scopes, **kwargs): also handle token caching because this credential doesn't cache the tokens it acquires. :param str scopes: desired scope for the access token. This credential allows only one scope per request. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` @@ -90,7 +81,7 @@ def get_token(self, *scopes, **kwargs): :raises ~azure.core.exceptions.ClientAuthenticationError: the credential invoked Azure PowerShell but didn't receive an access token """ - tenant_id = resolve_tenant("", self._allow_multitenant, **kwargs) + tenant_id = resolve_tenant("", **kwargs) command_line = get_command_line(scopes, tenant_id) output = run_command_line(command_line) token = parse_token(output) diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/browser.py b/sdk/identity/azure-identity/azure/identity/_credentials/browser.py index 6aead5b26f47..5b624046a7ac 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/browser.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/browser.py @@ -51,9 +51,6 @@ class InteractiveBrowserCredential(InteractiveCredential): will cache tokens in memory. :paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions :keyword int timeout: seconds to wait for the user to complete authentication. Defaults to 300 (5 minutes). - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the user is registered in. When False, which is the default, the credential will acquire tokens only from the - user's home tenant or the tenant specified by **tenant_id**. :raises ValueError: invalid **redirect_uri** """ diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/certificate.py b/sdk/identity/azure-identity/azure/identity/_credentials/certificate.py index cdb999d41898..1169fb178942 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/certificate.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/certificate.py @@ -39,18 +39,12 @@ class CertificateCredential(ClientCredentialBase): :keyword password: The certificate's password. If a unicode string, it will be encoded as UTF-8. If the certificate requires a different encoding, pass appropriately encoded bytes instead. :paramtype password: str or bytes - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the application is registered in. When False, which is the default, the credential will acquire tokens only from - the tenant specified by **tenant_id**. :keyword bool send_certificate_chain: if True, the credential will send the public certificate chain in the x5c header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. Defaults to False. :keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential will cache tokens in memory. :paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions - :keyword ~azure.identity.RegionalAuthority regional_authority: a :class:`~azure.identity.RegionalAuthority` to - which the credential will authenticate. This argument should be used only by applications deployed to Azure - VMs. """ def __init__(self, tenant_id, client_id, certificate_path=None, **kwargs): diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/client_secret.py b/sdk/identity/azure-identity/azure/identity/_credentials/client_secret.py index 9623b0ef8b1d..4b68e401a023 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/client_secret.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/client_secret.py @@ -21,15 +21,9 @@ class ClientSecretCredential(ClientCredentialBase): :keyword str authority: Authority of an Azure Active Directory endpoint, for example "login.microsoftonline.com", the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts` defines authorities for other clouds. - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the application is registered in. When False, which is the default, the credential will acquire tokens only from - the tenant specified by **tenant_id**. :keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential will cache tokens in memory. :paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions - :keyword ~azure.identity.RegionalAuthority regional_authority: a :class:`~azure.identity.RegionalAuthority` to - which the credential will authenticate. This argument should be used only by applications deployed to Azure - VMs. """ def __init__(self, tenant_id, client_id, client_secret, **kwargs): diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/default.py b/sdk/identity/azure-identity/azure/identity/_credentials/default.py index 90ddc1b39b53..75c2111362bb 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/default.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/default.py @@ -47,9 +47,6 @@ class DefaultAzureCredential(ChainedTokenCredential): This default behavior is configurable with keyword arguments. - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the application is registered in. When False, which is the default, the credential will acquire tokens only from - its configured tenant. This argument doesn't apply to managed identity authentication. :keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com', the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts` defines authorities for other clouds. Managed identities ignore this because they reside in a single cloud. @@ -136,9 +133,9 @@ def __init__(self, **kwargs): if not exclude_visual_studio_code_credential: credentials.append(VisualStudioCodeCredential(**vscode_args)) if not exclude_cli_credential: - credentials.append(AzureCliCredential(**kwargs)) + credentials.append(AzureCliCredential()) if not exclude_powershell_credential: - credentials.append(AzurePowerShellCredential(**kwargs)) + credentials.append(AzurePowerShellCredential()) if not exclude_interactive_browser_credential: if interactive_browser_client_id: credentials.append( @@ -158,8 +155,7 @@ def get_token(self, *scopes, **kwargs): This method is called automatically by Azure SDK clients. :param str scopes: desired scopes for the access token. This method requires at least one scope. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/device_code.py b/sdk/identity/azure-identity/azure/identity/_credentials/device_code.py index 657bd5eb5568..e5af7b89c8d1 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/device_code.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/device_code.py @@ -55,9 +55,6 @@ class DeviceCodeCredential(InteractiveCredential): :keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential will cache tokens in memory. :paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the user is registered in. When False, which is the default, the credential will acquire tokens only from the - user's home tenant or the tenant specified by **tenant_id**. """ def __init__(self, client_id=DEVELOPER_SIGN_ON_CLIENT_ID, **kwargs): diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/environment.py b/sdk/identity/azure-identity/azure/identity/_credentials/environment.py index 8d0e7401d8b2..d2b134d11e9e 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/environment.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/environment.py @@ -52,10 +52,6 @@ class EnvironmentCredential(object): - **AZURE_TENANT_ID**: (optional) ID of the service principal's tenant. Also called its 'directory' ID. If not provided, defaults to the 'organizations' tenant, which supports only Azure Active Directory work or school accounts. - - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the application or user is registered in. When False, which is the default, the credential will acquire tokens - only from the tenant specified by **AZURE_TENANT_ID**. """ def __init__(self, **kwargs): @@ -123,8 +119,7 @@ def get_token(self, *scopes, **kwargs): # pylint:disable=unused-argument This method is called automatically by Azure SDK clients. :param str scopes: desired scopes for the access token. This method requires at least one scope. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/on_behalf_of.py b/sdk/identity/azure-identity/azure/identity/_credentials/on_behalf_of.py index bc39c7a475e1..70f3c407ca16 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/on_behalf_of.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/on_behalf_of.py @@ -39,9 +39,6 @@ class OnBehalfOfCredential(MsalCredential, GetTokenMixin): :param str user_assertion: the access token the credential will use as the user assertion when requesting on-behalf-of tokens - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the application is registered in. When False, which is the default, the credential will acquire tokens only - from the tenant specified by **tenant_id**. :keyword str authority: Authority of an Azure Active Directory endpoint, for example "login.microsoftonline.com", the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts` defines authorities for other clouds. diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/shared_cache.py b/sdk/identity/azure-identity/azure/identity/_credentials/shared_cache.py index 906aaad174fc..3cef36e80507 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/shared_cache.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/shared_cache.py @@ -34,10 +34,6 @@ class SharedTokenCacheCredential(object): :keyword cache_persistence_options: configuration for persistent token caching. If not provided, the credential will use the persistent cache shared by Microsoft development applications :paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the user is registered in. When False, which is the default, the credential will acquire tokens only from the - user's home tenant or, if a value was given for **authentication_record**, the tenant specified by the - :class:`AuthenticationRecord`. """ def __init__(self, username=None, **kwargs): diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/silent.py b/sdk/identity/azure-identity/azure/identity/_credentials/silent.py index b1aa1ade8c46..08e0611fa5e4 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/silent.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/silent.py @@ -35,7 +35,6 @@ def __init__(self, authentication_record, **kwargs): # authenticate in the tenant that produced the record unless "tenant_id" specifies another self._tenant_id = kwargs.pop("tenant_id", None) or self._auth_record.tenant_id validate_tenant_id(self._tenant_id) - self._allow_multitenant = kwargs.pop("allow_multitenant_authentication", False) self._cache = kwargs.pop("_cache", None) self._client_applications = {} # type: Dict[str, PublicClientApplication] self._client = MsalClient(**kwargs) @@ -74,7 +73,7 @@ def _initialize(self): self._initialized = True def _get_client_application(self, **kwargs): - tenant_id = resolve_tenant(self._tenant_id, self._allow_multitenant, **kwargs) + tenant_id = resolve_tenant(self._tenant_id, **kwargs) if tenant_id not in self._client_applications: # CP1 = can handle claims challenges (CAE) capabilities = None if "AZURE_IDENTITY_DISABLE_CP1" in os.environ else ["CP1"] diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/user_password.py b/sdk/identity/azure-identity/azure/identity/_credentials/user_password.py index 77281a185e6e..0521e8fa42d6 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/user_password.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/user_password.py @@ -37,9 +37,6 @@ class UsernamePasswordCredential(InteractiveCredential): :keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential will cache tokens in memory. :paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the user is registered in. When False, which is the default, the credential will acquire tokens only from the - user's home tenant or the tenant specified by **tenant_id**. """ def __init__(self, client_id, username, password, **kwargs): diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/vscode.py b/sdk/identity/azure-identity/azure/identity/_credentials/vscode.py index cd6866f319da..a66dc9d234a9 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/vscode.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/vscode.py @@ -120,9 +120,6 @@ class VisualStudioCodeCredential(_VSCodeCredentialBase, GetTokenMixin): :keyword str tenant_id: ID of the tenant the credential should authenticate in. Defaults to the "Azure: Tenant" setting in VS Code's user settings or, when that setting has no value, the "organizations" tenant, which supports only Azure Active Directory work or school accounts. - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the user is registered in. When False, which is the default, the credential will acquire tokens only from the - user's home tenant or the tenant configured by **tenant_id** or VS Code's user settings. """ def __enter__(self): diff --git a/sdk/identity/azure-identity/azure/identity/_internal/__init__.py b/sdk/identity/azure-identity/azure/identity/_internal/__init__.py index a1799b0679a8..d9c7203734a5 100644 --- a/sdk/identity/azure-identity/azure/identity/_internal/__init__.py +++ b/sdk/identity/azure-identity/azure/identity/_internal/__init__.py @@ -7,8 +7,6 @@ from six.moves.urllib_parse import urlparse -from azure.core.exceptions import ClientAuthenticationError - from .._constants import EnvironmentVariables, KnownAuthorities if TYPE_CHECKING: @@ -66,21 +64,16 @@ def validate_tenant_id(tenant_id): ) -def resolve_tenant(default_tenant, allow_multitenant, tenant_id=None, **_): - # type: (str, bool, Optional[str], **Any) -> str +def resolve_tenant(default_tenant, tenant_id=None, **_): + # type: (str, Optional[str], **Any) -> str """Returns the correct tenant for a token request given a credential's configuration""" if ( tenant_id is None - or tenant_id == default_tenant - or os.environ.get(EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION) + or default_tenant == "adfs" + or os.environ.get(EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH) ): return default_tenant - if not allow_multitenant: - raise ClientAuthenticationError( - 'The specified tenant for this token request, "{}", does not match'.format(tenant_id) - + ' the configured tenant, and "allow_multitenant_authentication" is False.' - ) return tenant_id diff --git a/sdk/identity/azure-identity/azure/identity/_internal/aad_client_base.py b/sdk/identity/azure-identity/azure/identity/_internal/aad_client_base.py index b6a44e8d5681..f33cf40ce227 100644 --- a/sdk/identity/azure-identity/azure/identity/_internal/aad_client_base.py +++ b/sdk/identity/azure-identity/azure/identity/_internal/aad_client_base.py @@ -47,13 +47,12 @@ class AadClientBase(ABC): _POST = ["POST"] def __init__( - self, tenant_id, client_id, authority=None, cache=None, allow_multitenant_authentication=False, **kwargs + self, tenant_id, client_id, authority=None, cache=None, **kwargs ): - # type: (str, str, Optional[str], Optional[TokenCache], bool, **Any) -> None + # type: (str, str, Optional[str], Optional[TokenCache], **Any) -> None self._authority = normalize_authority(authority) if authority else get_default_authority() self._tenant_id = tenant_id - self._allow_multitenant = allow_multitenant_authentication self._cache = cache or TokenCache() self._client_id = client_id @@ -61,7 +60,7 @@ def __init__( def get_cached_access_token(self, scopes, **kwargs): # type: (Iterable[str], **Any) -> Optional[AccessToken] - tenant = resolve_tenant(self._tenant_id, self._allow_multitenant, **kwargs) + tenant = resolve_tenant(self._tenant_id, **kwargs) tokens = self._cache.find( TokenCache.CredentialType.ACCESS_TOKEN, target=list(scopes), @@ -260,7 +259,7 @@ def _get_refresh_token_request(self, scopes, refresh_token, **kwargs): def _get_token_url(self, **kwargs): # type: (**Any) -> str - tenant = resolve_tenant(self._tenant_id, self._allow_multitenant, **kwargs) + tenant = resolve_tenant(self._tenant_id, **kwargs) return "/".join((self._authority, tenant, "oauth2/v2.0/token")) def _post(self, data, **kwargs): diff --git a/sdk/identity/azure-identity/azure/identity/_internal/get_token_mixin.py b/sdk/identity/azure-identity/azure/identity/_internal/get_token_mixin.py index d8b683e984bd..29f30bb1fb8a 100644 --- a/sdk/identity/azure-identity/azure/identity/_internal/get_token_mixin.py +++ b/sdk/identity/azure-identity/azure/identity/_internal/get_token_mixin.py @@ -57,8 +57,7 @@ def get_token(self, *scopes, **kwargs): This method is called automatically by Azure SDK clients. :param str scopes: desired scopes for the access token. This method requires at least one scope. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` diff --git a/sdk/identity/azure-identity/azure/identity/_internal/interactive.py b/sdk/identity/azure-identity/azure/identity/_internal/interactive.py index a095c783c595..e448e6d90a9c 100644 --- a/sdk/identity/azure-identity/azure/identity/_internal/interactive.py +++ b/sdk/identity/azure-identity/azure/identity/_internal/interactive.py @@ -109,8 +109,7 @@ def get_token(self, *scopes, **kwargs): :param str scopes: desired scopes for the access token. This method requires at least one scope. :keyword str claims: additional claims required in the token, such as those returned in a resource provider's claims challenge following an authorization failure - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` diff --git a/sdk/identity/azure-identity/azure/identity/_internal/msal_credentials.py b/sdk/identity/azure-identity/azure/identity/_internal/msal_credentials.py index 8ac10bbd687d..4ab266ae26c3 100644 --- a/sdk/identity/azure-identity/azure/identity/_internal/msal_credentials.py +++ b/sdk/identity/azure-identity/azure/identity/_internal/msal_credentials.py @@ -28,13 +28,9 @@ def __init__(self, client_id, client_credential=None, **kwargs): # type: (str, Optional[Union[str, Dict]], **Any) -> None authority = kwargs.pop("authority", None) self._authority = normalize_authority(authority) if authority else get_default_authority() - self._regional_authority = kwargs.pop( - "regional_authority", os.environ.get(EnvironmentVariables.AZURE_REGIONAL_AUTHORITY_NAME) - ) + self._regional_authority = os.environ.get(EnvironmentVariables.AZURE_REGIONAL_AUTHORITY_NAME) self._tenant_id = kwargs.pop("tenant_id", None) or "organizations" validate_tenant_id(self._tenant_id) - self._allow_multitenant = kwargs.pop("allow_multitenant_authentication", False) - self._client = MsalClient(**kwargs) self._client_applications = {} # type: Dict[str, msal.ClientApplication] self._client_credential = client_credential @@ -63,7 +59,7 @@ def close(self): def _get_app(self, **kwargs): # type: (**Any) -> msal.ClientApplication - tenant_id = resolve_tenant(self._tenant_id, self._allow_multitenant, **kwargs) + tenant_id = resolve_tenant(self._tenant_id, **kwargs) if tenant_id not in self._client_applications: # CP1 = can handle claims challenges (CAE) capabilities = None if "AZURE_IDENTITY_DISABLE_CP1" in os.environ else ["CP1"] diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/application.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/application.py index 7e63bc9b78db..c7812ea69587 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/application.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/application.py @@ -48,10 +48,6 @@ class AzureApplicationCredential(ChainedTokenCredential): `_ for an overview of managed identities. - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the application or user is registered in. When False, which is the default, the credential will acquire tokens - only from the tenant specified by **AZURE_TENANT_ID**. This argument doesn't apply to managed identity - authentication. :keyword str authority: Authority of an Azure Active Directory endpoint, for example "login.microsoftonline.com", the authority for Azure Public Cloud, which is the default when no value is given for this keyword argument or environment variable AZURE_AUTHORITY_HOST. :class:`~azure.identity.AzureAuthorityHosts` defines authorities for diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/authorization_code.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/authorization_code.py index 225fbe434d94..4befed6e9eeb 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/authorization_code.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/authorization_code.py @@ -30,9 +30,6 @@ class AuthorizationCodeCredential(AsyncContextManager, GetTokenMixin): the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts` defines authorities for other clouds. :keyword str client_secret: One of the application's client secrets. Required only for web apps and web APIs. - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the user is registered in. When False, which is the default, the credential will acquire tokens only from the - user's home tenant or the tenant specified by **tenant_id**. """ async def __aenter__(self): @@ -66,8 +63,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken": redeeming the authorization code. :param str scopes: desired scopes for the access token. This method requires at least one scope. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_cli.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_cli.py index 944a2211d023..869cf5de69ae 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_cli.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_cli.py @@ -31,15 +31,8 @@ class AzureCliCredential(AsyncContextManager): """Authenticates by requesting a token from the Azure CLI. This requires previously logging in to Azure via "az login", and will use the CLI's currently logged in identity. - - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the identity logged in to the Azure CLI is registered in. When False, which is the default, the credential will - acquire tokens only from the tenant of the Azure CLI's active subscription. """ - def __init__(self, **kwargs: "Any") -> None: - self._allow_multitenant = kwargs.get("allow_multitenant_authentication", False) - @log_get_token_async async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken": """Request an access token for `scopes`. @@ -48,8 +41,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken": also handle token caching because this credential doesn't cache the tokens it acquires. :param str scopes: desired scope for the access token. This credential allows only one scope per request. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` @@ -63,7 +55,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken": resource = _scopes_to_resource(*scopes) command = COMMAND_LINE.format(resource) - tenant = resolve_tenant("", self._allow_multitenant, **kwargs) + tenant = resolve_tenant("", **kwargs) if tenant: command += " --tenant " + tenant output = await _run_command(command) diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_powershell.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_powershell.py index cfb3cd4331a1..0881dd4e2b99 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_powershell.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_powershell.py @@ -28,15 +28,8 @@ class AzurePowerShellCredential(AsyncContextManager): """Authenticates by requesting a token from Azure PowerShell. This requires previously logging in to Azure via "Connect-AzAccount", and will use the currently logged in identity. - - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the identity logged in to Azure PowerShell is registered in. When False, which is the default, the credential - will acquire tokens only from the tenant of Azure PowerShell's active subscription. """ - def __init__(self, **kwargs: "Any") -> None: - self._allow_multitenant = kwargs.get("allow_multitenant_authentication", False) - @log_get_token_async async def get_token( self, *scopes: str, **kwargs: "Any" @@ -47,8 +40,7 @@ async def get_token( also handle token caching because this credential doesn't cache the tokens it acquires. :param str scopes: desired scope for the access token. This credential allows only one scope per request. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` @@ -61,7 +53,7 @@ async def get_token( if sys.platform.startswith("win") and not isinstance(asyncio.get_event_loop(), asyncio.ProactorEventLoop): return _SyncCredential().get_token(*scopes, **kwargs) - tenant_id = resolve_tenant("", self._allow_multitenant, **kwargs) + tenant_id = resolve_tenant("", **kwargs) command_line = get_command_line(scopes, tenant_id) output = await run_command_line(command_line) token = parse_token(output) diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/certificate.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/certificate.py index a78b9b790eac..0957e6a151c6 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/certificate.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/certificate.py @@ -40,9 +40,6 @@ class CertificateCredential(AsyncContextManager, GetTokenMixin): :keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential will cache tokens in memory. :paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the application is registered in. When False, which is the default, the credential will acquire tokens only from - the tenant specified by **tenant_id**. """ def __init__(self, tenant_id, client_id, certificate_path=None, **kwargs): diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/client_secret.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/client_secret.py index 676e0b15e790..4bcfa49cbd19 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/client_secret.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/client_secret.py @@ -29,9 +29,6 @@ class ClientSecretCredential(AsyncContextManager, GetTokenMixin): :keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential will cache tokens in memory. :paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the application is registered in. When False, which is the default, the credential will acquire tokens only from - the tenant specified by **tenant_id**. """ def __init__(self, tenant_id: str, client_id: str, client_secret: str, **kwargs: "Any") -> None: diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/default.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/default.py index 8888e5d28874..5b32b0429c54 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/default.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/default.py @@ -42,9 +42,6 @@ class DefaultAzureCredential(ChainedTokenCredential): This default behavior is configurable with keyword arguments. - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the application is registered in. When False, which is the default, the credential will acquire tokens only from - its configured tenant. This argument doesn't apply to managed identity authentication. :keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com', the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts` defines authorities for other clouds. Managed identities ignore this because they reside in a single cloud. @@ -121,9 +118,9 @@ def __init__(self, **kwargs: "Any") -> None: if not exclude_visual_studio_code_credential: credentials.append(VisualStudioCodeCredential(**vscode_args)) if not exclude_cli_credential: - credentials.append(AzureCliCredential(**kwargs)) + credentials.append(AzureCliCredential()) if not exclude_powershell_credential: - credentials.append(AzurePowerShellCredential(**kwargs)) + credentials.append(AzurePowerShellCredential()) super().__init__(*credentials) @@ -133,8 +130,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken": This method is called automatically by Azure SDK clients. :param str scopes: desired scopes for the access token. This method requires at least one scope. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/environment.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/environment.py index d4c0bdff2047..944add051c94 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/environment.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/environment.py @@ -37,10 +37,6 @@ class EnvironmentCredential(AsyncContextManager): - **AZURE_CLIENT_ID**: the service principal's client ID - **AZURE_CLIENT_CERTIFICATE_PATH**: path to a PEM or PKCS12 certificate file including the private key. The certificate must not be password-protected. - - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the application or user is registered in. When False, which is the default, the credential will acquire tokens - only from the tenant specified by **AZURE_TENANT_ID**. """ def __init__(self, **kwargs: "Any") -> None: @@ -91,8 +87,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken": This method is called automatically by Azure SDK clients. :param str scopes: desired scopes for the access token. This method requires at least one scope. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/on_behalf_of.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/on_behalf_of.py index bc7c75f2123a..202de061d3ae 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/on_behalf_of.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/on_behalf_of.py @@ -36,9 +36,6 @@ class OnBehalfOfCredential(AsyncContextManager, GetTokenMixin): :param str user_assertion: the access token the credential will use as the user assertion when requesting on-behalf-of tokens - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the application is registered in. When False, which is the default, the credential will acquire tokens only - from the tenant specified by **tenant_id**. :keyword str authority: Authority of an Azure Active Directory endpoint, for example "login.microsoftonline.com", the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts` defines authorities for other clouds. @@ -74,7 +71,7 @@ def __init__( else: self._client_credential = client_credential - # note AadClient handles "allow_multitenant_authentication", "authority", and any pipeline kwargs + # note AadClient handles "authority" and any pipeline kwargs self._client = AadClient(tenant_id, client_id, **kwargs) self._assertion = user_assertion diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/shared_cache.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/shared_cache.py index b663f16623af..1852b8523586 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/shared_cache.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/shared_cache.py @@ -32,9 +32,6 @@ class SharedTokenCacheCredential(SharedTokenCacheBase, AsyncContextManager): :keyword cache_persistence_options: configuration for persistent token caching. If not provided, the credential will use the persistent cache shared by Microsoft development applications :paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the user is registered in. When False, which is the default, the credential will acquire tokens only from the - user's home tenant. """ async def __aenter__(self): @@ -57,8 +54,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken": # py This method is called automatically by Azure SDK clients. :param str scopes: desired scopes for the access token. This method requires at least one scope. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/vscode.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/vscode.py index 586354f8ff30..c66e1e1c2611 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/vscode.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/vscode.py @@ -26,9 +26,6 @@ class VisualStudioCodeCredential(_VSCodeCredentialBase, AsyncContextManager, Get :keyword str tenant_id: ID of the tenant the credential should authenticate in. Defaults to the "Azure: Tenant" setting in VS Code's user settings or, when that setting has no value, the "organizations" tenant, which supports only Azure Active Directory work or school accounts. - :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant - the user is registered in. When False, which is the default, the credential will acquire tokens only from the - user's home tenant or the tenant configured by **tenant_id** or VS Code's user settings. """ async def __aenter__(self) -> "VisualStudioCodeCredential": @@ -48,8 +45,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken": This method is called automatically by Azure SDK clients. :param str scopes: desired scopes for the access token. This method requires at least one scope. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` diff --git a/sdk/identity/azure-identity/azure/identity/aio/_internal/get_token_mixin.py b/sdk/identity/azure-identity/azure/identity/aio/_internal/get_token_mixin.py index 17b8d225b55d..f41db52d4132 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_internal/get_token_mixin.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_internal/get_token_mixin.py @@ -47,8 +47,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken": This method is called automatically by Azure SDK clients. :param str scopes: desired scopes for the access token. This method requires at least one scope. - :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication** - is False, specifying a tenant with this argument may raise an exception. + :keyword str tenant_id: optional tenant to include in the token request. :rtype: :class:`azure.core.credentials.AccessToken` diff --git a/sdk/identity/azure-identity/tests/test_aad_client.py b/sdk/identity/azure-identity/tests/test_aad_client.py index 68a452c1e287..3bee135d4268 100644 --- a/sdk/identity/azure-identity/tests/test_aad_client.py +++ b/sdk/identity/azure-identity/tests/test_aad_client.py @@ -304,11 +304,7 @@ def test_multitenant_cache(): assert client_b.get_cached_access_token([scope]) is None # but C allows multitenant auth and should therefore return the token from tenant_a when appropriate - client_c = AadClient(tenant_id=tenant_c, allow_multitenant_authentication=True, **common_args) + client_c = AadClient(tenant_id=tenant_c, **common_args) assert client_c.get_cached_access_token([scope]) is None token = client_c.get_cached_access_token([scope], tenant_id=tenant_a) assert token.token == expected_token - with patch.dict( - "os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}, clear=True - ): - assert client_c.get_cached_access_token([scope], tenant_id=tenant_a) is None diff --git a/sdk/identity/azure-identity/tests/test_aad_client_async.py b/sdk/identity/azure-identity/tests/test_aad_client_async.py index dba17bc11cf6..ab30393f93d3 100644 --- a/sdk/identity/azure-identity/tests/test_aad_client_async.py +++ b/sdk/identity/azure-identity/tests/test_aad_client_async.py @@ -308,11 +308,7 @@ async def test_multitenant_cache(): assert client_b.get_cached_access_token([scope]) is None # but C allows multitenant auth and should therefore return the token from tenant_a when appropriate - client_c = AadClient(tenant_id=tenant_c, allow_multitenant_authentication=True, **common_args) + client_c = AadClient(tenant_id=tenant_c, **common_args) assert client_c.get_cached_access_token([scope]) is None token = client_c.get_cached_access_token([scope], tenant_id=tenant_a) assert token.token == expected_token - with patch.dict( - "os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}, clear=True - ): - assert client_c.get_cached_access_token([scope], tenant_id=tenant_a) is None diff --git a/sdk/identity/azure-identity/tests/test_auth_code.py b/sdk/identity/azure-identity/tests/test_auth_code.py index 29ab3733a633..f2fe752ff528 100644 --- a/sdk/identity/azure-identity/tests/test_auth_code.py +++ b/sdk/identity/azure-identity/tests/test_auth_code.py @@ -118,9 +118,7 @@ def test_auth_code_credential(): assert transport.send.call_count == 2 -def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +def test_multitenant_authentication(): first_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" @@ -138,7 +136,6 @@ def send(request, **_): "client-id", "authcode", "https://localhost", - allow_multitenant_authentication=True, transport=Mock(send=send), ) token = credential.get_token("scope") @@ -154,10 +151,7 @@ def send(request, **_): token = credential.get_token("scope") assert token.token == first_token - def test_multitenant_authentication_not_allowed(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - expected_tenant = "expected-tenant" expected_token = "***" @@ -174,15 +168,12 @@ def send(request, **_): token = credential.get_token("scope") assert token.token == expected_token - # explicitly specifying the configured tenant is okay token = credential.get_token("scope", tenant_id=expected_tenant) assert token.token == expected_token - # but any other tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - credential.get_token("scope", tenant_id="un" + expected_tenant) + token = credential.get_token("scope", tenant_id="un" + expected_tenant) + assert token.token == expected_token * 2 - # ...unless the compat switch is enabled - with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}): + with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}): token = credential.get_token("scope", tenant_id="un" + expected_tenant) - assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled" + assert token.token == expected_token diff --git a/sdk/identity/azure-identity/tests/test_auth_code_async.py b/sdk/identity/azure-identity/tests/test_auth_code_async.py index 5eb55e6515cc..e5a91ca62750 100644 --- a/sdk/identity/azure-identity/tests/test_auth_code_async.py +++ b/sdk/identity/azure-identity/tests/test_auth_code_async.py @@ -142,9 +142,7 @@ async def test_auth_code_credential(): assert transport.send.call_count == 2 -async def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +async def test_multitenant_authentication(): first_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" @@ -162,7 +160,6 @@ async def send(request, **_): "client-id", "authcode", "https://localhost", - allow_multitenant_authentication=True, transport=Mock(send=send), ) token = await credential.get_token("scope") @@ -178,10 +175,7 @@ async def send(request, **_): token = await credential.get_token("scope") assert token.token == first_token - async def test_multitenant_authentication_not_allowed(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - expected_tenant = "expected-tenant" expected_token = "***" @@ -198,15 +192,12 @@ async def send(request, **_): token = await credential.get_token("scope") assert token.token == expected_token - # explicitly specifying the configured tenant is okay token = await credential.get_token("scope", tenant_id=expected_tenant) assert token.token == expected_token - # but any other tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - await credential.get_token("scope", tenant_id="un" + expected_tenant) + token = await credential.get_token("scope", tenant_id="un" + expected_tenant) + assert token.token == expected_token * 2 - # ...unless the compat switch is enabled - with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}): + with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}): token = await credential.get_token("scope", tenant_id="un" + expected_tenant) - assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled" + assert token.token == expected_token diff --git a/sdk/identity/azure-identity/tests/test_certificate_credential.py b/sdk/identity/azure-identity/tests/test_certificate_credential.py index 389bf336297c..9215882abb3a 100644 --- a/sdk/identity/azure-identity/tests/test_certificate_credential.py +++ b/sdk/identity/azure-identity/tests/test_certificate_credential.py @@ -7,7 +7,8 @@ from azure.core.exceptions import ClientAuthenticationError from azure.core.pipeline.policies import ContentDecodePolicy, SansIOHTTPPolicy -from azure.identity import CertificateCredential, RegionalAuthority, TokenCachePersistenceOptions +from azure.identity import CertificateCredential, TokenCachePersistenceOptions +from azure.identity._enums import RegionalAuthority from azure.identity._constants import EnvironmentVariables from azure.identity._credentials.certificate import load_pkcs12_certificate from azure.identity._internal.user_agent import USER_AGENT @@ -151,17 +152,6 @@ def test_regional_authority(): for region in RegionalAuthority: mock_confidential_client.reset_mock() - with patch.dict("os.environ", {}, clear=True): - credential = CertificateCredential("tenant", "client-id", PEM_CERT_PATH, regional_authority=region) - with patch("msal.ConfidentialClientApplication", mock_confidential_client): - # must call get_token because the credential constructs the MSAL application lazily - credential.get_token("scope") - - assert mock_confidential_client.call_count == 1 - _, kwargs = mock_confidential_client.call_args - assert kwargs["azure_region"] == region - mock_confidential_client.reset_mock() - # region can be configured via environment variable with patch.dict("os.environ", {EnvironmentVariables.AZURE_REGIONAL_AUTHORITY_NAME: region}, clear=True): credential = CertificateCredential("tenant", "client-id", PEM_CERT_PATH) @@ -359,9 +349,7 @@ def test_certificate_arguments(): @pytest.mark.parametrize("cert_path,cert_password", ALL_CERTS) -def test_allow_multitenant_authentication(cert_path, cert_password): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +def test_multitenant_authentication(cert_path, cert_password): first_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" @@ -382,7 +370,6 @@ def send(request, **_): "client-id", cert_path, password=cert_password, - allow_multitenant_authentication=True, transport=Mock(send=send), ) token = credential.get_token("scope") @@ -401,8 +388,6 @@ def send(request, **_): @pytest.mark.parametrize("cert_path,cert_password", ALL_CERTS) def test_multitenant_authentication_backcompat(cert_path, cert_password): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - expected_tenant = "expected-tenant" expected_token = "***" @@ -426,13 +411,5 @@ def send(request, **_): token = credential.get_token("scope", tenant_id=expected_tenant) assert token.token == expected_token - # but any other tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - credential.get_token("scope", tenant_id="un" + expected_tenant) - - # ...unless the compat switch is enabled - with patch.dict( - os.environ, {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}, clear=True - ): - token = credential.get_token("scope", tenant_id="un" + expected_tenant) - assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled" + token = credential.get_token("scope", tenant_id="un" + expected_tenant) + assert token.token == expected_token diff --git a/sdk/identity/azure-identity/tests/test_certificate_credential_async.py b/sdk/identity/azure-identity/tests/test_certificate_credential_async.py index a2338e78a910..8c5cc5b67efa 100644 --- a/sdk/identity/azure-identity/tests/test_certificate_credential_async.py +++ b/sdk/identity/azure-identity/tests/test_certificate_credential_async.py @@ -270,9 +270,7 @@ def test_certificate_arguments(): @pytest.mark.asyncio @pytest.mark.parametrize("cert_path,cert_password", ALL_CERTS) -async def test_allow_multitenant_authentication(cert_path, cert_password): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +async def test_multitenant_authentication(cert_path, cert_password): first_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" @@ -290,7 +288,6 @@ async def send(request, **_): "client-id", cert_path, password=cert_password, - allow_multitenant_authentication=True, transport=Mock(send=send), ) token = await credential.get_token("scope") @@ -310,8 +307,6 @@ async def send(request, **_): @pytest.mark.asyncio @pytest.mark.parametrize("cert_path,cert_password", ALL_CERTS) async def test_multitenant_authentication_backcompat(cert_path, cert_password): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - expected_tenant = "expected-tenant" expected_token = "***" @@ -332,13 +327,5 @@ async def send(request, **_): token = await credential.get_token("scope", tenant_id=expected_tenant) assert token.token == expected_token - # but any other tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - await credential.get_token("scope", tenant_id="un" + expected_tenant) - - # ...unless the compat switch is enabled - with patch.dict( - "os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}, clear=True - ): - token = await credential.get_token("scope", tenant_id="un" + expected_tenant) - assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled" + token = await credential.get_token("scope", tenant_id="un" + expected_tenant) + assert token.token == expected_token * 2 diff --git a/sdk/identity/azure-identity/tests/test_cli_credential.py b/sdk/identity/azure-identity/tests/test_cli_credential.py index bac97fd4ac7c..6eb71b97e722 100644 --- a/sdk/identity/azure-identity/tests/test_cli_credential.py +++ b/sdk/identity/azure-identity/tests/test_cli_credential.py @@ -152,9 +152,7 @@ def test_timeout(): AzureCliCredential().get_token("scope") -def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +def test_multitenant_authentication(): default_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" @@ -174,7 +172,7 @@ def fake_check_output(command_line, **_): } ) - credential = AzureCliCredential(allow_multitenant_authentication=True) + credential = AzureCliCredential() with mock.patch(CHECK_OUTPUT, fake_check_output): token = credential.get_token("scope") assert token.token == first_token @@ -189,10 +187,7 @@ def fake_check_output(command_line, **_): token = credential.get_token("scope") assert token.token == first_token - def test_multitenant_authentication_not_allowed(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - expected_tenant = "expected-tenant" expected_token = "***" @@ -214,15 +209,8 @@ def fake_check_output(command_line, **_): token = credential.get_token("scope") assert token.token == expected_token - # specifying a tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - credential.get_token("scope", tenant_id="un" + expected_tenant) - - # ...unless the compat switch is enabled with mock.patch.dict( - "os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"} + "os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"} ): token = credential.get_token("scope", tenant_id="un" + expected_tenant) - assert ( - token.token == expected_token - ), "credential should ignore tenant_id kwarg when the compat switch is enabled" + assert token.token == expected_token diff --git a/sdk/identity/azure-identity/tests/test_cli_credential_async.py b/sdk/identity/azure-identity/tests/test_cli_credential_async.py index d5f5885f5d1f..2276a1bff3e5 100644 --- a/sdk/identity/azure-identity/tests/test_cli_credential_async.py +++ b/sdk/identity/azure-identity/tests/test_cli_credential_async.py @@ -185,9 +185,7 @@ async def test_timeout(): assert proc.kill.call_count == 1 -async def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +async def test_multitenant_authentication(): default_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" @@ -208,7 +206,7 @@ async def fake_exec(*args, **_): ).encode() return mock.Mock(communicate=mock.Mock(return_value=get_completed_future((output, b""))), returncode=0) - credential = AzureCliCredential(allow_multitenant_authentication=True) + credential = AzureCliCredential() with mock.patch(SUBPROCESS_EXEC, fake_exec): token = await credential.get_token("scope") assert token.token == first_token @@ -223,10 +221,7 @@ async def fake_exec(*args, **_): token = await credential.get_token("scope") assert token.token == first_token - async def test_multitenant_authentication_not_allowed(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - expected_tenant = "expected-tenant" expected_token = "***" @@ -249,13 +244,6 @@ async def fake_exec(*args, **_): token = await credential.get_token("scope") assert token.token == expected_token - # specifying a tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - await credential.get_token("scope", tenant_id="un" + expected_tenant) - - # ...unless the compat switch is enabled - with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}): + with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}): token = await credential.get_token("scope", tenant_id="un" + expected_tenant) - assert ( - token.token == expected_token - ), "credential should ignore tenant_id kwarg when the compat switch is enabled" + assert token.token == expected_token diff --git a/sdk/identity/azure-identity/tests/test_client_secret_credential.py b/sdk/identity/azure-identity/tests/test_client_secret_credential.py index 7c694c617c60..2528bf0ad191 100644 --- a/sdk/identity/azure-identity/tests/test_client_secret_credential.py +++ b/sdk/identity/azure-identity/tests/test_client_secret_credential.py @@ -4,7 +4,8 @@ # ------------------------------------ from azure.core.exceptions import ClientAuthenticationError from azure.core.pipeline.policies import ContentDecodePolicy, SansIOHTTPPolicy -from azure.identity import ClientSecretCredential, RegionalAuthority, TokenCachePersistenceOptions +from azure.identity import ClientSecretCredential, TokenCachePersistenceOptions +from azure.identity._enums import RegionalAuthority from azure.identity._constants import EnvironmentVariables from azure.identity._internal.user_agent import USER_AGENT from msal import TokenCache @@ -128,17 +129,6 @@ def test_regional_authority(): for region in RegionalAuthority: mock_confidential_client.reset_mock() - with patch.dict("os.environ", {}, clear=True): - credential = ClientSecretCredential("tenant", "client-id", "secret", regional_authority=region) - with patch("msal.ConfidentialClientApplication", mock_confidential_client): - # must call get_token because the credential constructs the MSAL application lazily - credential.get_token("scope") - - assert mock_confidential_client.call_count == 1 - _, kwargs = mock_confidential_client.call_args - assert kwargs["azure_region"] == region - mock_confidential_client.reset_mock() - # region can be configured via environment variable with patch.dict("os.environ", {EnvironmentVariables.AZURE_REGIONAL_AUTHORITY_NAME: region}, clear=True): credential = ClientSecretCredential("tenant", "client-id", "secret") @@ -211,9 +201,7 @@ def test_cache_multiple_clients(): assert len(cache.find(TokenCache.CredentialType.ACCESS_TOKEN)) == 2 -def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +def test_multitenant_authentication(): first_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" @@ -230,7 +218,7 @@ def send(request, **_): return mock_response(json_payload=build_aad_response(access_token=token)) credential = ClientSecretCredential( - first_tenant, "client-id", "secret", allow_multitenant_authentication=True, transport=Mock(send=send) + first_tenant, "client-id", "secret", transport=Mock(send=send) ) token = credential.get_token("scope") assert token.token == first_token @@ -245,10 +233,7 @@ def send(request, **_): token = credential.get_token("scope") assert token.token == first_token - def test_multitenant_authentication_not_allowed(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - expected_tenant = "expected-tenant" expected_token = "***" @@ -266,15 +251,9 @@ def send(request, **_): token = credential.get_token("scope") assert token.token == expected_token - # explicitly specifying the configured tenant is okay token = credential.get_token("scope", tenant_id=expected_tenant) assert token.token == expected_token - # but any other tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - credential.get_token("scope", tenant_id="un" + expected_tenant) - - # ...unless the compat switch is enabled - with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}): + with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}): token = credential.get_token("scope", tenant_id="un" + expected_tenant) - assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled" + assert token.token == expected_token diff --git a/sdk/identity/azure-identity/tests/test_client_secret_credential_async.py b/sdk/identity/azure-identity/tests/test_client_secret_credential_async.py index 60554ce9c90b..03e8d323c81d 100644 --- a/sdk/identity/azure-identity/tests/test_client_secret_credential_async.py +++ b/sdk/identity/azure-identity/tests/test_client_secret_credential_async.py @@ -251,9 +251,7 @@ async def test_cache_multiple_clients(): @pytest.mark.asyncio -async def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +async def test_multitenant_authentication(): first_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" @@ -267,7 +265,7 @@ async def send(request, **_): return mock_response(json_payload=build_aad_response(access_token=token)) credential = ClientSecretCredential( - first_tenant, "client-id", "secret", allow_multitenant_authentication=True, transport=Mock(send=send) + first_tenant, "client-id", "secret", transport=Mock(send=send) ) token = await credential.get_token("scope") assert token.token == first_token @@ -282,11 +280,8 @@ async def send(request, **_): token = await credential.get_token("scope") assert token.token == first_token - @pytest.mark.asyncio async def test_multitenant_authentication_not_allowed(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - expected_tenant = "expected-tenant" expected_token = "***" @@ -301,15 +296,12 @@ async def send(request, **_): token = await credential.get_token("scope") assert token.token == expected_token - # explicitly specifying the configured tenant is okay token = await credential.get_token("scope", tenant_id=expected_tenant) assert token.token == expected_token - # but any other tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - await credential.get_token("scope", tenant_id="un" + expected_tenant) + token = await credential.get_token("scope", tenant_id="un" + expected_tenant) + assert token.token == expected_token * 2 - # ...unless the compat switch is enabled - with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}): + with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}): token = await credential.get_token("scope", tenant_id="un" + expected_tenant) - assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled" + assert token.token == expected_token diff --git a/sdk/identity/azure-identity/tests/test_default.py b/sdk/identity/azure-identity/tests/test_default.py index 8c8189996861..1c9120a9942a 100644 --- a/sdk/identity/azure-identity/tests/test_default.py +++ b/sdk/identity/azure-identity/tests/test_default.py @@ -402,36 +402,6 @@ def validate_client_id(credential): validate_client_id(mock_credential) -@pytest.mark.parametrize("expected_value", (True, False)) -def test_allow_multitenant_authentication(expected_value): - """the credential should pass "allow_multitenant_authentication" to the inner credentials which support it""" - - inner_credentials = { - credential: Mock() - for credential in ( - "AzureCliCredential", - "AzurePowerShellCredential", - "EnvironmentCredential", - "InteractiveBrowserCredential", - "ManagedIdentityCredential", # will ignore the argument - "SharedTokenCacheCredential", - ) - } - with patch.multiple(DefaultAzureCredential.__module__, **inner_credentials): - DefaultAzureCredential( - allow_multitenant_authentication=expected_value, exclude_interactive_browser_credential=False - ) - - for credential_name, mock_credential in inner_credentials.items(): - assert mock_credential.call_count == 1 - _, kwargs = mock_credential.call_args - - assert "allow_multitenant_authentication" in kwargs, ( - '"allow_multitenant_authentication" was not passed to ' + credential_name - ) - assert kwargs["allow_multitenant_authentication"] == expected_value - - def test_unexpected_kwarg(): """the credential shouldn't raise when given an unexpected keyword argument""" DefaultAzureCredential(foo=42) diff --git a/sdk/identity/azure-identity/tests/test_default_async.py b/sdk/identity/azure-identity/tests/test_default_async.py index 0f144350640c..e4ff1a9fcf11 100644 --- a/sdk/identity/azure-identity/tests/test_default_async.py +++ b/sdk/identity/azure-identity/tests/test_default_async.py @@ -312,33 +312,6 @@ def get_credential_for_shared_cache_test(expected_refresh_token, expected_access return DefaultAzureCredential(_cache=cache, transport=transport, **exclude_other_credentials, **kwargs) -@pytest.mark.parametrize("expected_value", (True, False)) -def test_allow_multitenant_authentication(expected_value): - """the credential should pass "allow_multitenant_authentication" to the inner credentials which support it""" - - inner_credentials = { - credential: Mock() - for credential in ( - "AzureCliCredential", - "AzurePowerShellCredential", - "EnvironmentCredential", - "ManagedIdentityCredential", # will ignore the argument - "SharedTokenCacheCredential", - ) - } - with patch.multiple(DefaultAzureCredential.__module__, **inner_credentials): - DefaultAzureCredential(allow_multitenant_authentication=expected_value) - - for credential_name, mock_credential in inner_credentials.items(): - assert mock_credential.call_count == 1 - _, kwargs = mock_credential.call_args - - assert "allow_multitenant_authentication" in kwargs, ( - '"allow_multitenant_authentication" was not passed to ' + credential_name - ) - assert kwargs["allow_multitenant_authentication"] == expected_value - - def test_unexpected_kwarg(): """the credential shouldn't raise when given an unexpected keyword argument""" DefaultAzureCredential(foo=42) diff --git a/sdk/identity/azure-identity/tests/test_interactive_credential.py b/sdk/identity/azure-identity/tests/test_interactive_credential.py index e82f76f6c5ba..ba6e407e3aec 100644 --- a/sdk/identity/azure-identity/tests/test_interactive_credential.py +++ b/sdk/identity/azure-identity/tests/test_interactive_credential.py @@ -282,9 +282,7 @@ def _request_token(self, *_, **__): assert record.username == username -def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +def test_multitenant_authentication(): first_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" @@ -312,7 +310,6 @@ def send(request, **_): credential = MockCredential( tenant_id=first_tenant, - allow_multitenant_authentication=True, request_token=request_token, transport=Mock(send=send), ) @@ -329,10 +326,7 @@ def send(request, **_): token = credential.get_token("scope") assert token.token == first_token - def test_multitenant_authentication_not_allowed(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - expected_tenant = "expected-tenant" expected_token = "***" @@ -360,15 +354,9 @@ def send(request, **_): token = credential.get_token("scope") assert token.token == expected_token - # explicitly specifying the configured tenant is okay token = credential.get_token("scope", tenant_id=expected_tenant) assert token.token == expected_token - # but any other tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - credential.get_token("scope", tenant_id="un" + expected_tenant) - - # ...unless the compat switch is enabled - with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}): + with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}): token = credential.get_token("scope", tenant_id="un" + expected_tenant) - assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled" + assert token.token == expected_token diff --git a/sdk/identity/azure-identity/tests/test_obo.py b/sdk/identity/azure-identity/tests/test_obo.py index 7cd402ee8c36..413b149be398 100644 --- a/sdk/identity/azure-identity/tests/test_obo.py +++ b/sdk/identity/azure-identity/tests/test_obo.py @@ -92,9 +92,7 @@ def test_obo_cert(self): credential.get_token(self.obo_settings["scope"]) -def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +def test_multitenant_authentication(): first_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" @@ -113,7 +111,7 @@ def send(request, **_): transport = Mock(send=Mock(wraps=send)) credential = OnBehalfOfCredential( - first_tenant, "client-id", "secret", "assertion", allow_multitenant_authentication=True, transport=transport + first_tenant, "client-id", "secret", "assertion", transport=transport ) token = credential.get_token("scope") assert token.token == first_token diff --git a/sdk/identity/azure-identity/tests/test_obo_async.py b/sdk/identity/azure-identity/tests/test_obo_async.py index 0bbaecb79150..c39957be0afd 100644 --- a/sdk/identity/azure-identity/tests/test_obo_async.py +++ b/sdk/identity/azure-identity/tests/test_obo_async.py @@ -69,9 +69,7 @@ async def test_context_manager(): @pytest.mark.asyncio -async def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +async def test_multitenant_authentication(): first_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" @@ -87,7 +85,7 @@ async def send(request, **_): transport = Mock(send=Mock(wraps=send)) credential = OnBehalfOfCredential( - first_tenant, "client-id", "secret", "assertion", allow_multitenant_authentication=True, transport=transport + first_tenant, "client-id", "secret", "assertion", transport=transport ) token = await credential.get_token("scope") assert token.token == first_token diff --git a/sdk/identity/azure-identity/tests/test_powershell_credential.py b/sdk/identity/azure-identity/tests/test_powershell_credential.py index cdf83543f220..3766b84cb6e8 100644 --- a/sdk/identity/azure-identity/tests/test_powershell_credential.py +++ b/sdk/identity/azure-identity/tests/test_powershell_credential.py @@ -243,9 +243,7 @@ def Popen(args, **kwargs): assert Fake.calls == 2 -def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +def test_multitenant_authentication(): first_token = "***" second_tenant = "second-tenant" second_token = first_token * 2 @@ -264,7 +262,7 @@ def fake_Popen(command, **_): communicate = Mock(return_value=(stdout, "")) return Mock(communicate=communicate, returncode=0) - credential = AzurePowerShellCredential(allow_multitenant_authentication=True) + credential = AzurePowerShellCredential() with patch(POPEN, fake_Popen): token = credential.get_token("scope") assert token.token == first_token @@ -276,10 +274,7 @@ def fake_Popen(command, **_): token = credential.get_token("scope") assert token.token == first_token - def test_multitenant_authentication_not_allowed(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - expected_token = "***" def fake_Popen(command, **_): @@ -300,13 +295,6 @@ def fake_Popen(command, **_): token = credential.get_token("scope") assert token.token == expected_token - # specifying a tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - credential.get_token("scope", tenant_id="some tenant") - - # ...unless the compat switch is enabled - with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}): + with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}): token = credential.get_token("scope", tenant_id="some tenant") - assert ( - token.token == expected_token - ), "credential should ignore tenant_id kwarg when the compat switch is enabled" + assert token.token == expected_token diff --git a/sdk/identity/azure-identity/tests/test_powershell_credential_async.py b/sdk/identity/azure-identity/tests/test_powershell_credential_async.py index 2e67d6c19906..0dcc56267f78 100644 --- a/sdk/identity/azure-identity/tests/test_powershell_credential_async.py +++ b/sdk/identity/azure-identity/tests/test_powershell_credential_async.py @@ -244,9 +244,7 @@ async def mock_exec(*args, **kwargs): assert calls == 2 -async def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +async def test_multitenant_authentication(): first_token = "***" second_tenant = "second-tenant" second_token = first_token * 2 @@ -266,7 +264,7 @@ async def fake_exec(*args, **_): communicate = Mock(return_value=get_completed_future((stdout.encode(), b""))) return Mock(communicate=communicate, returncode=0) - credential = AzurePowerShellCredential(allow_multitenant_authentication=True) + credential = AzurePowerShellCredential() with patch(CREATE_SUBPROCESS_EXEC, fake_exec): token = await credential.get_token("scope") assert token.token == first_token @@ -278,10 +276,7 @@ async def fake_exec(*args, **_): token = await credential.get_token("scope") assert token.token == first_token - async def test_multitenant_authentication_not_allowed(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - expected_token = "***" async def fake_exec(*args, **_): @@ -302,13 +297,6 @@ async def fake_exec(*args, **_): token = await credential.get_token("scope") assert token.token == expected_token - # specifying a tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - await credential.get_token("scope", tenant_id="some tenant") - - # ...unless the compat switch is enabled - with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}): + with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}): token = await credential.get_token("scope", tenant_id="some tenant") - assert ( - token.token == expected_token - ), "credential should ignore tenant_id kwarg when the compat switch is enabled" + assert token.token == expected_token diff --git a/sdk/identity/azure-identity/tests/test_shared_cache_credential.py b/sdk/identity/azure-identity/tests/test_shared_cache_credential.py index 5081825cebb5..287815c00cce 100644 --- a/sdk/identity/azure-identity/tests/test_shared_cache_credential.py +++ b/sdk/identity/azure-identity/tests/test_shared_cache_credential.py @@ -825,9 +825,7 @@ def test_claims_challenge(): assert kwargs["claims_challenge"] == expected_claims -def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +def test_multitenant_authentication(): default_tenant = "organizations" first_token = "***" second_tenant = "second-tenant" @@ -851,7 +849,7 @@ def send(request, **_): cache = populated_cache(expected_account) credential = SharedTokenCacheCredential( - allow_multitenant_authentication=True, authority=authority, transport=Mock(send=send), _cache=cache + authority=authority, transport=Mock(send=send), _cache=cache ) token = credential.get_token("scope") assert token.token == first_token @@ -867,56 +865,7 @@ def send(request, **_): assert token.token == first_token -def test_multitenant_authentication_not_allowed(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - - default_tenant = "organizations" - expected_token = "***" - - def send(request, **_): - parsed = urlparse(request.url) - tenant_id = parsed.path.split("/")[1] - assert tenant_id == default_tenant - return mock_response( - json_payload=build_aad_response( - access_token=expected_token, - id_token_claims=id_token_claims(aud="...", iss="...", sub="..."), - ) - ) - - tenant_id = "tenant-id" - client_id = "client-id" - authority = "localhost" - object_id = "object-id" - username = "me" - - expected_account = get_account_event( - username, object_id, tenant_id, authority=authority, client_id=client_id, refresh_token="**" - ) - cache = populated_cache(expected_account) - - credential = SharedTokenCacheCredential(authority=authority, transport=Mock(send=send), _cache=cache) - - token = credential.get_token("scope") - assert token.token == expected_token - - # explicitly specifying the configured tenant is okay - token = credential.get_token("scope", tenant_id=default_tenant) - assert token.token == expected_token - - # but any other tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - credential.get_token("scope", tenant_id="some tenant") - - # ...unless the compat switch is enabled - with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}): - token = credential.get_token("scope", tenant_id="some tenant") - assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled" - - -def test_allow_multitenant_authentication_auth_record(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +def test_multitenant_authentication_auth_record(): default_tenant = "organizations" first_token = "***" second_tenant = "second-tenant" @@ -947,7 +896,6 @@ def send(request, **_): cache = populated_cache(expected_account) credential = SharedTokenCacheCredential( - allow_multitenant_authentication=True, authority=authority, transport=Mock(send=send), authentication_record=record, @@ -967,64 +915,6 @@ def send(request, **_): assert token.token == first_token -def test_multitenant_authentication_not_allowed_authentication_record(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - - default_tenant = "organizations" - expected_token = "***" - - authority = AzureAuthorityHosts.AZURE_PUBLIC_CLOUD - object_id = "object-id" - home_account_id = object_id + "." + default_tenant - record = AuthenticationRecord(default_tenant, "client-id", authority, home_account_id, "user") - - def send(request, **_): - parsed = urlparse(request.url) - tenant_id = parsed.path.split("/")[1] - if "/oauth2/v2.0/token" not in request.url: - return get_discovery_response("https://{}/{}".format(parsed.netloc, tenant_id)) - - assert tenant_id == default_tenant - return mock_response( - json_payload=build_aad_response( - access_token=expected_token, - id_token_claims=id_token_claims(aud="...", iss="...", sub="..."), - ) - ) - - expected_account = get_account_event( - record.username, - object_id, - record.tenant_id, - authority=record.authority, - client_id=record.client_id, - refresh_token="**", - ) - cache = populated_cache(expected_account) - - credential = SharedTokenCacheCredential( - authority=authority, transport=Mock(send=send), authentication_record=record, _cache=cache - ) - - token = credential.get_token("scope") - assert token.token == expected_token - - # explicitly specifying the configured tenant is okay - token = credential.get_token("scope", tenant_id=default_tenant) - assert token.token == expected_token - - # but any other tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - credential.get_token("scope", tenant_id="some tenant") - - # ...unless the compat switch is enabled - with patch.dict( - "os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}, clear=True - ): - token = credential.get_token("scope", tenant_id="some tenant") - assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled" - - def get_account_event( username, uid, utid, authority=None, client_id="client-id", refresh_token="refresh-token", scopes=None, **kwargs ): @@ -1054,3 +944,41 @@ def populated_cache(*accounts): cache.add(account) cache.add = lambda *_, **__: None # prevent anything being added to the cache return cache + +def test_multitenant_authentication_not_allowed(): + default_tenant = "organizations" + expected_token = "***" + + def send(request, **_): + parsed = urlparse(request.url) + tenant_id = parsed.path.split("/")[1] + assert tenant_id == default_tenant + return mock_response( + json_payload=build_aad_response( + access_token=expected_token, + id_token_claims=id_token_claims(aud="...", iss="...", sub="..."), + ) + ) + + tenant_id = "tenant-id" + client_id = "client-id" + authority = "localhost" + object_id = "object-id" + username = "me" + + expected_account = get_account_event( + username, object_id, tenant_id, authority=authority, client_id=client_id, refresh_token="**" + ) + cache = populated_cache(expected_account) + + credential = SharedTokenCacheCredential(authority=authority, transport=Mock(send=send), _cache=cache) + + token = credential.get_token("scope") + assert token.token == expected_token + + token = credential.get_token("scope", tenant_id=default_tenant) + assert token.token == expected_token + + with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}): + token = credential.get_token("scope", tenant_id="some tenant") + assert token.token == expected_token diff --git a/sdk/identity/azure-identity/tests/test_shared_cache_credential_async.py b/sdk/identity/azure-identity/tests/test_shared_cache_credential_async.py index a6d7f0d67d60..9346755360f1 100644 --- a/sdk/identity/azure-identity/tests/test_shared_cache_credential_async.py +++ b/sdk/identity/azure-identity/tests/test_shared_cache_credential_async.py @@ -606,9 +606,7 @@ async def test_initialization(): @pytest.mark.asyncio -async def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +async def test_multitenant_authentication(): first_token = "***" second_tenant = "second-tenant" second_token = first_token * 2 @@ -630,7 +628,7 @@ async def send(request, **_): cache = populated_cache(expected_account) credential = SharedTokenCacheCredential( - allow_multitenant_authentication=True, authority=authority, transport=Mock(send=send), _cache=cache + authority=authority, transport=Mock(send=send), _cache=cache ) token = await credential.get_token("scope") assert token.token == first_token @@ -645,11 +643,8 @@ async def send(request, **_): token = await credential.get_token("scope") assert token.token == first_token - @pytest.mark.asyncio async def test_multitenant_authentication_not_allowed(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - default_tenant = "organizations" expected_token = "***" @@ -675,15 +670,9 @@ async def send(request, **_): token = await credential.get_token("scope") assert token.token == expected_token - # explicitly specifying the configured tenant is okay token = await credential.get_token("scope", tenant_id=default_tenant) assert token.token == expected_token - # but any other tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - await credential.get_token("scope", tenant_id="some tenant") - - # ...unless the compat switch is enabled - with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}): + with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}): token = await credential.get_token("scope", tenant_id="some tenant") - assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled" + assert token.token == expected_token diff --git a/sdk/identity/azure-identity/tests/test_vscode_credential.py b/sdk/identity/azure-identity/tests/test_vscode_credential.py index e6db05f56e5f..befe86ad7ce3 100644 --- a/sdk/identity/azure-identity/tests/test_vscode_credential.py +++ b/sdk/identity/azure-identity/tests/test_vscode_credential.py @@ -277,9 +277,7 @@ def test_no_user_settings(): assert transport.send.call_count == 1 -def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +def test_multitenant_authentication(): first_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" @@ -293,7 +291,7 @@ def send(request, **_): return mock_response(json_payload=build_aad_response(access_token=token)) credential = get_credential( - tenant_id=first_tenant, allow_multitenant_authentication=True, transport=mock.Mock(send=send) + tenant_id=first_tenant, transport=mock.Mock(send=send) ) with mock.patch(GET_REFRESH_TOKEN, lambda _: "**"): token = credential.get_token("scope") @@ -310,10 +308,7 @@ def send(request, **_): token = credential.get_token("scope") assert token.token == first_token - def test_multitenant_authentication_not_allowed(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - expected_tenant = "expected-tenant" expected_token = "***" @@ -329,15 +324,12 @@ def send(request, **_): token = credential.get_token("scope") assert token.token == expected_token - # explicitly specifying the configured tenant is okay token = credential.get_token("scope", tenant_id=expected_tenant) assert token.token == expected_token - # but any other tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - credential.get_token("scope", tenant_id="un" + expected_tenant) + token = credential.get_token("scope", tenant_id="un" + expected_tenant) + assert token.token == expected_token * 2 - # ...unless the compat switch is enabled - with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}): + with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}): token = credential.get_token("scope", tenant_id="un" + expected_tenant) - assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled" + assert token.token == expected_token diff --git a/sdk/identity/azure-identity/tests/test_vscode_credential_async.py b/sdk/identity/azure-identity/tests/test_vscode_credential_async.py index 40c996e39957..ff5716c7d9ad 100644 --- a/sdk/identity/azure-identity/tests/test_vscode_credential_async.py +++ b/sdk/identity/azure-identity/tests/test_vscode_credential_async.py @@ -268,9 +268,7 @@ async def test_no_user_settings(): @pytest.mark.asyncio -async def test_allow_multitenant_authentication(): - """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)""" - +async def test_multitenant_authentication(): first_tenant = "first-tenant" first_token = "***" second_tenant = "second-tenant" @@ -284,7 +282,7 @@ async def send(request, **_): return mock_response(json_payload=build_aad_response(access_token=token)) credential = get_credential( - tenant_id=first_tenant, allow_multitenant_authentication=True, transport=mock.Mock(send=send) + tenant_id=first_tenant, transport=mock.Mock(send=send) ) with mock.patch(GET_REFRESH_TOKEN, lambda _: "**"): token = await credential.get_token("scope") @@ -301,11 +299,8 @@ async def send(request, **_): token = await credential.get_token("scope") assert token.token == first_token - @pytest.mark.asyncio async def test_multitenant_authentication_not_allowed(): - """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)""" - expected_tenant = "expected-tenant" expected_token = "***" @@ -321,15 +316,12 @@ async def send(request, **_): token = await credential.get_token("scope") assert token.token == expected_token - # explicitly specifying the configured tenant is okay token = await credential.get_token("scope", tenant_id=expected_tenant) assert token.token == expected_token - # but any other tenant should get an error - with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"): - await credential.get_token("scope", tenant_id="un" + expected_tenant) + token = await credential.get_token("scope", tenant_id="un" + expected_tenant) + assert token.token == expected_token * 2 - # ...unless the compat switch is enabled - with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}): + with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}): token = await credential.get_token("scope", tenant_id="un" + expected_tenant) - assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled" + assert token.token == expected_token