From 4112ee6e0e2116f9a9d4a7c289a71241cad1cfb6 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 16:17:46 -0700 Subject: [PATCH 01/20] add regenerated swagger --- .../documents/_index/_generated/__init__.py | 2 +- .../_index/_generated/_configuration.py | 2 +- .../_index/_generated/_search_index_client.py | 2 +- .../_index/_generated/aio/__init__.py | 2 +- .../_generated/aio/_configuration_async.py | 2 +- .../aio/_search_index_client_async.py | 2 +- .../aio/operations_async/__init__.py | 2 +- .../_documents_operations_async.py | 49 +- .../_index/_generated/models/__init__.py | 8 +- .../_index/_generated/models/_models.py | 28 +- .../_index/_generated/models/_models_py3.py | 28 +- .../models/_search_index_client_enums.py | 24 +- .../_index/_generated/operations/__init__.py | 2 +- .../operations/_documents_operations.py | 49 +- .../_data_sources_operations_async.py | 92 +- .../_indexers_operations_async.py | 102 +- .../_indexes_operations_async.py | 190 +- .../_skillsets_operations_async.py | 92 +- .../_synonym_maps_operations_async.py | 48 +- .../_service/_generated/models/__init__.py | 136 +- .../_service/_generated/models/_models.py | 2184 ++++++++-------- .../_service/_generated/models/_models_py3.py | 2303 +++++++++-------- .../models/_search_service_client_enums.py | 556 ++-- .../operations/_data_sources_operations.py | 92 +- .../operations/_indexers_operations.py | 102 +- .../operations/_indexes_operations.py | 188 +- .../operations/_skillsets_operations.py | 92 +- .../operations/_synonym_maps_operations.py | 48 +- 28 files changed, 3296 insertions(+), 3131 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/__init__.py index ac06514f5327..7363c23e25cd 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_configuration.py index 3010c29cca9f..4ad630688a33 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_configuration.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_search_index_client.py index 7f112a9888de..8182e533ba81 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_search_index_client.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/__init__.py index a06ffca12355..921a60ecf1d2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_configuration_async.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_configuration_async.py index 2fe06bf7f544..54942faa0d61 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_configuration_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_configuration_async.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_search_index_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_search_index_client_async.py index 06ec78b349c8..36f6ad7726e5 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_search_index_client_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_search_index_client_async.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/__init__.py index 6b51d112c132..c3fee199ca19 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/_documents_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/_documents_operations_async.py index dc288eb01147..0a25976ad253 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/_documents_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/_documents_operations_async.py @@ -1,12 +1,12 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union import warnings -from azure.core.exceptions import map_error +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest @@ -52,7 +52,7 @@ async def count( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[int] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -84,7 +84,8 @@ async def count( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('long', pipeline_response) @@ -116,7 +117,7 @@ async def search_get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _include_total_result_count = None _facets = None @@ -215,7 +216,8 @@ async def search_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchDocumentsResult', pipeline_response) @@ -243,7 +245,7 @@ async def search_post( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -280,7 +282,8 @@ async def search_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchDocumentsResult', pipeline_response) @@ -312,7 +315,7 @@ async def get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[object] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -347,7 +350,8 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('object', pipeline_response) @@ -383,7 +387,7 @@ async def suggest_get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SuggestDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _filter = None _use_fuzzy_matching = None @@ -454,7 +458,8 @@ async def suggest_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response) @@ -482,7 +487,7 @@ async def suggest_post( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SuggestDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -519,7 +524,8 @@ async def suggest_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response) @@ -547,7 +553,7 @@ async def index( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.IndexDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -584,7 +590,8 @@ async def index( if response.status_code not in [200, 207]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = None if response.status_code == 200: @@ -624,7 +631,7 @@ async def autocomplete_get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AutocompleteResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None _autocomplete_mode = None @@ -691,7 +698,8 @@ async def autocomplete_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AutocompleteResult', pipeline_response) @@ -719,7 +727,7 @@ async def autocomplete_post( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AutocompleteResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -756,7 +764,8 @@ async def autocomplete_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AutocompleteResult', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/__init__.py index ad760a1d771e..5971f054dfba 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -16,7 +16,7 @@ from ._models_py3 import IndexingResult from ._models_py3 import RequestOptions from ._models_py3 import SearchDocumentsResult - from ._models_py3 import SearchError, SearchErrorException + from ._models_py3 import SearchError from ._models_py3 import SearchOptions from ._models_py3 import SearchRequest from ._models_py3 import SearchResult @@ -36,7 +36,7 @@ from ._models import IndexingResult # type: ignore from ._models import RequestOptions # type: ignore from ._models import SearchDocumentsResult # type: ignore - from ._models import SearchError, SearchErrorException # type: ignore + from ._models import SearchError # type: ignore from ._models import SearchOptions # type: ignore from ._models import SearchRequest # type: ignore from ._models import SearchResult # type: ignore @@ -64,7 +64,7 @@ 'IndexingResult', 'RequestOptions', 'SearchDocumentsResult', - 'SearchError', 'SearchErrorException', + 'SearchError', 'SearchOptions', 'SearchRequest', 'SearchResult', diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models.py index 88b4aacde572..69693a04bf57 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -449,31 +449,6 @@ def __init__( self.next_link = None -class SearchErrorException(HttpResponseError): - """Server responded with exception of type: 'SearchError'. - - :param response: Server response to be deserialized. - :param error_model: A deserialized model of the response body as model. - """ - - def __init__(self, response, error_model): - self.error = error_model - super(SearchErrorException, self).__init__(response=response, error_model=error_model) - - @classmethod - def from_response(cls, response, deserialize): - """Deserialize this response as this exception, or a subclass of this exception. - - :param response: Server response to be deserialized. - :param deserialize: A deserializer - """ - model_name = 'SearchError' - error = deserialize(model_name, response) - if error is None: - error = deserialize.dependencies[model_name]() - return error._EXCEPTION_TYPE(response, error) - - class SearchError(msrest.serialization.Model): """Describes an error condition for the Azure Cognitive Search API. @@ -488,7 +463,6 @@ class SearchError(msrest.serialization.Model): :ivar details: An array of details about specific errors that led to this reported error. :vartype details: list[~search_index_client.models.SearchError] """ - _EXCEPTION_TYPE = SearchErrorException _validation = { 'code': {'readonly': True}, diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models_py3.py index 4b8f7bda6f7f..7e394ea69d39 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models_py3.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models_py3.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -480,31 +480,6 @@ def __init__( self.next_link = None -class SearchErrorException(HttpResponseError): - """Server responded with exception of type: 'SearchError'. - - :param response: Server response to be deserialized. - :param error_model: A deserialized model of the response body as model. - """ - - def __init__(self, response, error_model): - self.error = error_model - super(SearchErrorException, self).__init__(response=response, error_model=error_model) - - @classmethod - def from_response(cls, response, deserialize): - """Deserialize this response as this exception, or a subclass of this exception. - - :param response: Server response to be deserialized. - :param deserialize: A deserializer - """ - model_name = 'SearchError' - error = deserialize(model_name, response) - if error is None: - error = deserialize.dependencies[model_name]() - return error._EXCEPTION_TYPE(response, error) - - class SearchError(msrest.serialization.Model): """Describes an error condition for the Azure Cognitive Search API. @@ -519,7 +494,6 @@ class SearchError(msrest.serialization.Model): :ivar details: An array of details about specific errors that led to this reported error. :vartype details: list[~search_index_client.models.SearchError] """ - _EXCEPTION_TYPE = SearchErrorException _validation = { 'code': {'readonly': True}, diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_search_index_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_search_index_client_enums.py index ec3e46a89f5a..f8c0578dc65c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_search_index_client_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_search_index_client_enums.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -10,23 +10,23 @@ class IndexActionType(str, Enum): """The operation to perform on a document in an indexing batch. """ - upload = "upload" - merge = "merge" - merge_or_upload = "mergeOrUpload" - delete = "delete" + upload = "upload" #: Inserts the document into the index if it is new and updates it if it exists. All fields are replaced in the update case. + merge = "merge" #: Merges the specified field values with an existing document. If the document does not exist, the merge will fail. Any field you specify in a merge will replace the existing field in the document. This also applies to collections of primitive and complex types. + merge_or_upload = "mergeOrUpload" #: Behaves like merge if a document with the given key already exists in the index. If the document does not exist, it behaves like upload with a new document. + delete = "delete" #: Removes the specified document from the index. Any field you specify in a delete operation other than the key field will be ignored. If you want to remove an individual field from a document, use merge instead and set the field explicitly to null. class QueryType(str, Enum): - simple = "simple" - full = "full" + simple = "simple" #: Uses the simple query syntax for searches. Search text is interpreted using a simple query language that allows for symbols such as +, * and "". Queries are evaluated across all searchable fields by default, unless the searchFields parameter is specified. + full = "full" #: Uses the full Lucene query syntax for searches. Search text is interpreted using the Lucene query language which allows field-specific and weighted searches, as well as other advanced features. class SearchMode(str, Enum): - any = "any" - all = "all" + any = "any" #: Any of the search terms must be matched in order to count the document as a match. + all = "all" #: All of the search terms must be matched in order to count the document as a match. class AutocompleteMode(str, Enum): - one_term = "oneTerm" - two_terms = "twoTerms" - one_term_with_context = "oneTermWithContext" + one_term = "oneTerm" #: Only one term is suggested. If the query has two terms, only the last term is completed. For example, if the input is 'washington medic', the suggested terms could include 'medicaid', 'medicare', and 'medicine'. + two_terms = "twoTerms" #: Matching two-term phrases in the index will be suggested. For example, if the input is 'medic', the suggested terms could include 'medicare coverage' and 'medical assistant'. + one_term_with_context = "oneTermWithContext" #: Completes the last term in a query with two or more terms, where the last two terms are a phrase that exists in the index. For example, if the input is 'washington medic', the suggested terms could include 'washington medicaid' and 'washington medical'. diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/__init__.py index 1de4dc8bc765..bcb01fb453e2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/_documents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/_documents_operations.py index 718835b3c195..923e4fb4c95b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/_documents_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/_documents_operations.py @@ -1,12 +1,12 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union import warnings -from azure.core.exceptions import map_error +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse @@ -53,7 +53,7 @@ def count( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[int] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -85,7 +85,8 @@ def count( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('long', pipeline_response) @@ -118,7 +119,7 @@ def search_get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _include_total_result_count = None _facets = None @@ -217,7 +218,8 @@ def search_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchDocumentsResult', pipeline_response) @@ -246,7 +248,7 @@ def search_post( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -283,7 +285,8 @@ def search_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchDocumentsResult', pipeline_response) @@ -316,7 +319,7 @@ def get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[object] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -351,7 +354,8 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('object', pipeline_response) @@ -388,7 +392,7 @@ def suggest_get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SuggestDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _filter = None _use_fuzzy_matching = None @@ -459,7 +463,8 @@ def suggest_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response) @@ -488,7 +493,7 @@ def suggest_post( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SuggestDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -525,7 +530,8 @@ def suggest_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response) @@ -554,7 +560,7 @@ def index( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.IndexDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -591,7 +597,8 @@ def index( if response.status_code not in [200, 207]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = None if response.status_code == 200: @@ -632,7 +639,7 @@ def autocomplete_get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AutocompleteResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None _autocomplete_mode = None @@ -699,7 +706,8 @@ def autocomplete_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AutocompleteResult', pipeline_response) @@ -728,7 +736,7 @@ def autocomplete_post( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AutocompleteResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -765,7 +773,8 @@ def autocomplete_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AutocompleteResult', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py index 431a492fd958..bb8449e817bd 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py @@ -40,35 +40,35 @@ def __init__(self, client, config, serializer, deserializer) -> None: async def create_or_update( self, data_source_name: str, - data_source: "models.DataSource", + data_source: "models.SearchIndexerDataSource", + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs - ) -> "models.DataSource": + ) -> "models.SearchIndexerDataSource": """Creates a new datasource or updates a datasource if it already exists. :param data_source_name: The name of the datasource to create or update. :type data_source_name: str :param data_source: The definition of the datasource to create or update. - :type data_source: ~search_service_client.models.DataSource + :type data_source: ~search_service_client.models.SearchIndexerDataSource + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response - :return: DataSource or the result of cls(response) - :rtype: ~search_service_client.models.DataSource or ~search_service_client.models.DataSource + :return: SearchIndexerDataSource or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerDataSource or ~search_service_client.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" @@ -90,17 +90,17 @@ async def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(data_source, 'DataSource') + body_content = self._serialize.body(data_source, 'SearchIndexerDataSource') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -114,10 +114,10 @@ async def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -128,18 +128,23 @@ async def create_or_update( async def delete( self, data_source_name: str, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs ) -> None: """Deletes a datasource. :param data_source_name: The name of the datasource to delete. :type data_source_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None @@ -149,11 +154,6 @@ async def delete( error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" @@ -174,10 +174,10 @@ async def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -199,7 +199,7 @@ async def get( data_source_name: str, request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.DataSource": + ) -> "models.SearchIndexerDataSource": """Retrieves a datasource definition. :param data_source_name: The name of the datasource to retrieve. @@ -207,11 +207,11 @@ async def get( :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: DataSource or the result of cls(response) - :rtype: ~search_service_client.models.DataSource + :return: SearchIndexerDataSource or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -247,7 +247,7 @@ async def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -321,22 +321,22 @@ async def list( async def create( self, - data_source: "models.DataSource", + data_source: "models.SearchIndexerDataSource", request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.DataSource": + ) -> "models.SearchIndexerDataSource": """Creates a new datasource. :param data_source: The definition of the datasource to create. - :type data_source: ~search_service_client.models.DataSource + :type data_source: ~search_service_client.models.SearchIndexerDataSource :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: DataSource or the result of cls(response) - :rtype: ~search_service_client.models.DataSource + :return: SearchIndexerDataSource or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -364,7 +364,7 @@ async def create( # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(data_source, 'DataSource') + body_content = self._serialize.body(data_source, 'SearchIndexerDataSource') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -376,7 +376,7 @@ async def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py index acd91482da73..0d9e0ff7c835 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py @@ -154,35 +154,35 @@ async def run( async def create_or_update( self, indexer_name: str, - indexer: "models.Indexer", + indexer: "models.SearchIndexer", + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs - ) -> "models.Indexer": + ) -> "models.SearchIndexer": """Creates a new indexer or updates an indexer if it already exists. :param indexer_name: The name of the indexer to create or update. :type indexer_name: str :param indexer: The definition of the indexer to create or update. - :type indexer: ~search_service_client.models.Indexer + :type indexer: ~search_service_client.models.SearchIndexer + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response - :return: Indexer or the result of cls(response) - :rtype: ~search_service_client.models.Indexer or ~search_service_client.models.Indexer + :return: SearchIndexer or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexer or ~search_service_client.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" @@ -204,17 +204,17 @@ async def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(indexer, 'Indexer') + body_content = self._serialize.body(indexer, 'SearchIndexer') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -228,10 +228,10 @@ async def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -242,18 +242,23 @@ async def create_or_update( async def delete( self, indexer_name: str, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs ) -> None: """Deletes an indexer. :param indexer_name: The name of the indexer to delete. :type indexer_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None @@ -263,11 +268,6 @@ async def delete( error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" @@ -288,10 +288,10 @@ async def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -313,7 +313,7 @@ async def get( indexer_name: str, request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.Indexer": + ) -> "models.SearchIndexer": """Retrieves an indexer definition. :param indexer_name: The name of the indexer to retrieve. @@ -321,11 +321,11 @@ async def get( :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Indexer or the result of cls(response) - :rtype: ~search_service_client.models.Indexer + :return: SearchIndexer or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -361,7 +361,7 @@ async def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -435,22 +435,22 @@ async def list( async def create( self, - indexer: "models.Indexer", + indexer: "models.SearchIndexer", request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.Indexer": + ) -> "models.SearchIndexer": """Creates a new indexer. :param indexer: The definition of the indexer to create. - :type indexer: ~search_service_client.models.Indexer + :type indexer: ~search_service_client.models.SearchIndexer :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Indexer or the result of cls(response) - :rtype: ~search_service_client.models.Indexer + :return: SearchIndexer or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -478,7 +478,7 @@ async def create( # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(indexer, 'Indexer') + body_content = self._serialize.body(indexer, 'SearchIndexer') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -490,7 +490,7 @@ async def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -503,7 +503,7 @@ async def get_status( indexer_name: str, request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.IndexerExecutionInfo": + ) -> "models.SearchIndexerStatus": """Returns the current status and execution history of an indexer. :param indexer_name: The name of the indexer for which to retrieve status. @@ -511,11 +511,11 @@ async def get_status( :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: IndexerExecutionInfo or the result of cls(response) - :rtype: ~search_service_client.models.IndexerExecutionInfo + :return: SearchIndexerStatus or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerStatus :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.IndexerExecutionInfo"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerStatus"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -551,7 +551,7 @@ async def get_status( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('IndexerExecutionInfo', pipeline_response) + deserialized = self._deserialize('SearchIndexerStatus', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py index ddebc3f096c9..6993461cbe18 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py @@ -6,6 +6,7 @@ from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union import warnings +from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest @@ -39,22 +40,22 @@ def __init__(self, client, config, serializer, deserializer) -> None: async def create( self, - index: "models.Index", + index: "models.SearchIndex", request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.Index": + ) -> "models.SearchIndex": """Creates a new search index. :param index: The definition of the index to create. - :type index: ~search_service_client.models.Index + :type index: ~search_service_client.models.SearchIndex :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Index or the result of cls(response) - :rtype: ~search_service_client.models.Index + :return: SearchIndex or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -82,7 +83,7 @@ async def create( # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(index, 'Index') + body_content = self._serialize.body(index, 'SearchIndex') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -94,7 +95,7 @@ async def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -102,7 +103,7 @@ async def create( return deserialized create.metadata = {'url': '/indexes'} - async def list( + def list( self, select: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, @@ -111,8 +112,8 @@ async def list( """Lists all indexes available for a search service. :param select: Selects which top-level properties of the index definitions to retrieve. - Specified as a comma-separated list of JSON property names, or '*' for all properties. The - default is all properties. + Specified as a comma-separated list of JSON property names, or '*' for all properties. The + default is all properties. :type select: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions @@ -129,82 +130,101 @@ async def list( _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _x_ms_client_request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - header_parameters['Accept'] = 'application/json' - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(models.SearchError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ListIndexesResult', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + else: + url = next_link + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('ListIndexesResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + error = self._deserialize(models.SearchError, response) + map_error(status_code=response.status_code, response=response, error_map=error_map, model=error) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) list.metadata = {'url': '/indexes'} async def create_or_update( self, index_name: str, - index: "models.Index", + index: "models.SearchIndex", allow_index_downtime: Optional[bool] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs - ) -> "models.Index": + ) -> "models.SearchIndex": """Creates a new search index or updates an index if it already exists. :param index_name: The definition of the index to create or update. :type index_name: str :param index: The definition of the index to create or update. - :type index: ~search_service_client.models.Index + :type index: ~search_service_client.models.SearchIndex :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of the index can be impaired for several minutes after the index is updated, or longer for very large indexes. :type allow_index_downtime: bool + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response - :return: Index or the result of cls(response) - :rtype: ~search_service_client.models.Index or ~search_service_client.models.Index + :return: SearchIndex or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndex or ~search_service_client.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" @@ -228,17 +248,17 @@ async def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(index, 'Index') + body_content = self._serialize.body(index, 'SearchIndex') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -252,10 +272,10 @@ async def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -266,18 +286,23 @@ async def create_or_update( async def delete( self, index_name: str, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs ) -> None: - """Deletes a search index and all the documents it contains. + """Deletes a search index and all the documents it contains. This operation is permanent, with no recovery option. Make sure you have a master copy of your index definition, data ingestion code, and a backup of the primary data source in case you need to re-build the index. :param index_name: The name of the index to delete. :type index_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None @@ -287,11 +312,6 @@ async def delete( error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" @@ -312,10 +332,10 @@ async def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -337,7 +357,7 @@ async def get( index_name: str, request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.Index": + ) -> "models.SearchIndex": """Retrieves an index definition. :param index_name: The name of the index to retrieve. @@ -345,11 +365,11 @@ async def get( :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Index or the result of cls(response) - :rtype: ~search_service_client.models.Index + :return: SearchIndex or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -385,7 +405,7 @@ async def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py index b286e25f0b34..168e0d46ec59 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py @@ -40,36 +40,36 @@ def __init__(self, client, config, serializer, deserializer) -> None: async def create_or_update( self, skillset_name: str, - skillset: "models.Skillset", + skillset: "models.SearchIndexerSkillset", + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs - ) -> "models.Skillset": + ) -> "models.SearchIndexerSkillset": """Creates a new skillset in a search service or updates the skillset if it already exists. :param skillset_name: The name of the skillset to create or update. :type skillset_name: str :param skillset: The skillset containing one or more skills to create or update in a search service. - :type skillset: ~search_service_client.models.Skillset + :type skillset: ~search_service_client.models.SearchIndexerSkillset + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response - :return: Skillset or the result of cls(response) - :rtype: ~search_service_client.models.Skillset or ~search_service_client.models.Skillset + :return: SearchIndexerSkillset or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerSkillset or ~search_service_client.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" @@ -91,17 +91,17 @@ async def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(skillset, 'Skillset') + body_content = self._serialize.body(skillset, 'SearchIndexerSkillset') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -115,10 +115,10 @@ async def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -129,18 +129,23 @@ async def create_or_update( async def delete( self, skillset_name: str, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs ) -> None: """Deletes a skillset in a search service. :param skillset_name: The name of the skillset to delete. :type skillset_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None @@ -150,11 +155,6 @@ async def delete( error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" @@ -175,10 +175,10 @@ async def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -200,7 +200,7 @@ async def get( skillset_name: str, request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.Skillset": + ) -> "models.SearchIndexerSkillset": """Retrieves a skillset in a search service. :param skillset_name: The name of the skillset to retrieve. @@ -208,11 +208,11 @@ async def get( :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Skillset or the result of cls(response) - :rtype: ~search_service_client.models.Skillset + :return: SearchIndexerSkillset or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -248,7 +248,7 @@ async def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -322,22 +322,22 @@ async def list( async def create( self, - skillset: "models.Skillset", + skillset: "models.SearchIndexerSkillset", request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.Skillset": + ) -> "models.SearchIndexerSkillset": """Creates a new skillset in a search service. :param skillset: The skillset containing one or more skills to create in a search service. - :type skillset: ~search_service_client.models.Skillset + :type skillset: ~search_service_client.models.SearchIndexerSkillset :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Skillset or the result of cls(response) - :rtype: ~search_service_client.models.Skillset + :return: SearchIndexerSkillset or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -365,7 +365,7 @@ async def create( # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(skillset, 'Skillset') + body_content = self._serialize.body(skillset, 'SearchIndexerSkillset') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -377,7 +377,7 @@ async def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py index 445c1f96f0b4..16294474e20e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py @@ -41,8 +41,9 @@ async def create_or_update( self, synonym_map_name: str, synonym_map: "models.SynonymMap", + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs ) -> "models.SynonymMap": """Creates a new synonym map or updates a synonym map if it already exists. @@ -51,10 +52,14 @@ async def create_or_update( :type synonym_map_name: str :param synonym_map: The definition of the synonym map to create or update. :type synonym_map: ~search_service_client.models.SynonymMap + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) :rtype: ~search_service_client.models.SynonymMap or ~search_service_client.models.SynonymMap @@ -64,11 +69,6 @@ async def create_or_update( error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" @@ -90,10 +90,10 @@ async def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') @@ -128,18 +128,23 @@ async def create_or_update( async def delete( self, synonym_map_name: str, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs ) -> None: """Deletes a synonym map. :param synonym_map_name: The name of the synonym map to delete. :type synonym_map_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None @@ -149,11 +154,6 @@ async def delete( error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" @@ -174,10 +174,10 @@ async def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py index 9e8de4e9d799..50af0a4a97e6 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py @@ -5,14 +5,15 @@ # -------------------------------------------------------------------------- try: - from ._models_py3 import AccessCondition from ._models_py3 import AnalyzeRequest from ._models_py3 import AnalyzeResult - from ._models_py3 import Analyzer + from ._models_py3 import AnalyzedTokenInfo from ._models_py3 import AsciiFoldingTokenFilter from ._models_py3 import AzureActiveDirectoryApplicationCredentials + from ._models_py3 import BM25Similarity from ._models_py3 import CharFilter from ._models_py3 import CjkBigramTokenFilter + from ._models_py3 import ClassicSimilarity from ._models_py3 import ClassicTokenizer from ._models_py3 import CognitiveServicesAccount from ._models_py3 import CognitiveServicesAccountKey @@ -21,9 +22,7 @@ from ._models_py3 import CorsOptions from ._models_py3 import CustomAnalyzer from ._models_py3 import DataChangeDetectionPolicy - from ._models_py3 import DataContainer from ._models_py3 import DataDeletionDetectionPolicy - from ._models_py3 import DataSource from ._models_py3 import DataSourceCredentials from ._models_py3 import DefaultCognitiveServicesAccount from ._models_py3 import DictionaryDecompounderTokenFilter @@ -33,9 +32,7 @@ from ._models_py3 import EdgeNGramTokenFilterV2 from ._models_py3 import EdgeNGramTokenizer from ._models_py3 import ElisionTokenFilter - from ._models_py3 import EncryptionKey from ._models_py3 import EntityRecognitionSkill - from ._models_py3 import Field from ._models_py3 import FieldMapping from ._models_py3 import FieldMappingFunction from ._models_py3 import FreshnessScoringFunction @@ -43,16 +40,10 @@ from ._models_py3 import GetIndexStatisticsResult from ._models_py3 import HighWaterMarkChangeDetectionPolicy from ._models_py3 import ImageAnalysisSkill - from ._models_py3 import Index - from ._models_py3 import Indexer - from ._models_py3 import IndexerExecutionInfo from ._models_py3 import IndexerExecutionResult - from ._models_py3 import IndexerLimits from ._models_py3 import IndexingParameters from ._models_py3 import IndexingSchedule from ._models_py3 import InputFieldMappingEntry - from ._models_py3 import ItemError - from ._models_py3 import ItemWarning from ._models_py3 import KeepTokenFilter from ._models_py3 import KeyPhraseExtractionSkill from ._models_py3 import KeywordMarkerTokenFilter @@ -60,12 +51,17 @@ from ._models_py3 import KeywordTokenizerV2 from ._models_py3 import LanguageDetectionSkill from ._models_py3 import LengthTokenFilter + from ._models_py3 import LexicalAnalyzer + from ._models_py3 import LexicalTokenizer from ._models_py3 import LimitTokenFilter from ._models_py3 import ListDataSourcesResult from ._models_py3 import ListIndexersResult from ._models_py3 import ListIndexesResult from ._models_py3 import ListSkillsetsResult from ._models_py3 import ListSynonymMapsResult + from ._models_py3 import LuceneStandardAnalyzer + from ._models_py3 import LuceneStandardTokenizer + from ._models_py3 import LuceneStandardTokenizerV2 from ._models_py3 import MagnitudeScoringFunction from ._models_py3 import MagnitudeScoringParameters from ._models_py3 import MappingCharFilter @@ -89,21 +85,29 @@ from ._models_py3 import ScoringFunction from ._models_py3 import ScoringProfile from ._models_py3 import SearchError + from ._models_py3 import SearchField + from ._models_py3 import SearchIndex + from ._models_py3 import SearchIndexer + from ._models_py3 import SearchIndexerDataContainer + from ._models_py3 import SearchIndexerDataSource + from ._models_py3 import SearchIndexerError + from ._models_py3 import SearchIndexerLimits + from ._models_py3 import SearchIndexerSkill + from ._models_py3 import SearchIndexerSkillset + from ._models_py3 import SearchIndexerStatus + from ._models_py3 import SearchIndexerWarning + from ._models_py3 import SearchResourceEncryptionKey from ._models_py3 import SentimentSkill from ._models_py3 import ServiceCounters from ._models_py3 import ServiceLimits from ._models_py3 import ServiceStatistics from ._models_py3 import ShaperSkill from ._models_py3 import ShingleTokenFilter - from ._models_py3 import Skill - from ._models_py3 import Skillset + from ._models_py3 import Similarity from ._models_py3 import SnowballTokenFilter from ._models_py3 import SoftDeleteColumnDeletionDetectionPolicy from ._models_py3 import SplitSkill from ._models_py3 import SqlIntegratedChangeTrackingPolicy - from ._models_py3 import StandardAnalyzer - from ._models_py3 import StandardTokenizer - from ._models_py3 import StandardTokenizerV2 from ._models_py3 import StemmerOverrideTokenFilter from ._models_py3 import StemmerTokenFilter from ._models_py3 import StopAnalyzer @@ -116,22 +120,21 @@ from ._models_py3 import TextTranslationSkill from ._models_py3 import TextWeights from ._models_py3 import TokenFilter - from ._models_py3 import TokenInfo - from ._models_py3 import Tokenizer from ._models_py3 import TruncateTokenFilter from ._models_py3 import UaxUrlEmailTokenizer from ._models_py3 import UniqueTokenFilter from ._models_py3 import WebApiSkill from ._models_py3 import WordDelimiterTokenFilter except (SyntaxError, ImportError): - from ._models import AccessCondition # type: ignore from ._models import AnalyzeRequest # type: ignore from ._models import AnalyzeResult # type: ignore - from ._models import Analyzer # type: ignore + from ._models import AnalyzedTokenInfo # type: ignore from ._models import AsciiFoldingTokenFilter # type: ignore from ._models import AzureActiveDirectoryApplicationCredentials # type: ignore + from ._models import BM25Similarity # type: ignore from ._models import CharFilter # type: ignore from ._models import CjkBigramTokenFilter # type: ignore + from ._models import ClassicSimilarity # type: ignore from ._models import ClassicTokenizer # type: ignore from ._models import CognitiveServicesAccount # type: ignore from ._models import CognitiveServicesAccountKey # type: ignore @@ -140,9 +143,7 @@ from ._models import CorsOptions # type: ignore from ._models import CustomAnalyzer # type: ignore from ._models import DataChangeDetectionPolicy # type: ignore - from ._models import DataContainer # type: ignore from ._models import DataDeletionDetectionPolicy # type: ignore - from ._models import DataSource # type: ignore from ._models import DataSourceCredentials # type: ignore from ._models import DefaultCognitiveServicesAccount # type: ignore from ._models import DictionaryDecompounderTokenFilter # type: ignore @@ -152,9 +153,7 @@ from ._models import EdgeNGramTokenFilterV2 # type: ignore from ._models import EdgeNGramTokenizer # type: ignore from ._models import ElisionTokenFilter # type: ignore - from ._models import EncryptionKey # type: ignore from ._models import EntityRecognitionSkill # type: ignore - from ._models import Field # type: ignore from ._models import FieldMapping # type: ignore from ._models import FieldMappingFunction # type: ignore from ._models import FreshnessScoringFunction # type: ignore @@ -162,16 +161,10 @@ from ._models import GetIndexStatisticsResult # type: ignore from ._models import HighWaterMarkChangeDetectionPolicy # type: ignore from ._models import ImageAnalysisSkill # type: ignore - from ._models import Index # type: ignore - from ._models import Indexer # type: ignore - from ._models import IndexerExecutionInfo # type: ignore from ._models import IndexerExecutionResult # type: ignore - from ._models import IndexerLimits # type: ignore from ._models import IndexingParameters # type: ignore from ._models import IndexingSchedule # type: ignore from ._models import InputFieldMappingEntry # type: ignore - from ._models import ItemError # type: ignore - from ._models import ItemWarning # type: ignore from ._models import KeepTokenFilter # type: ignore from ._models import KeyPhraseExtractionSkill # type: ignore from ._models import KeywordMarkerTokenFilter # type: ignore @@ -179,12 +172,17 @@ from ._models import KeywordTokenizerV2 # type: ignore from ._models import LanguageDetectionSkill # type: ignore from ._models import LengthTokenFilter # type: ignore + from ._models import LexicalAnalyzer # type: ignore + from ._models import LexicalTokenizer # type: ignore from ._models import LimitTokenFilter # type: ignore from ._models import ListDataSourcesResult # type: ignore from ._models import ListIndexersResult # type: ignore from ._models import ListIndexesResult # type: ignore from ._models import ListSkillsetsResult # type: ignore from ._models import ListSynonymMapsResult # type: ignore + from ._models import LuceneStandardAnalyzer # type: ignore + from ._models import LuceneStandardTokenizer # type: ignore + from ._models import LuceneStandardTokenizerV2 # type: ignore from ._models import MagnitudeScoringFunction # type: ignore from ._models import MagnitudeScoringParameters # type: ignore from ._models import MappingCharFilter # type: ignore @@ -208,21 +206,29 @@ from ._models import ScoringFunction # type: ignore from ._models import ScoringProfile # type: ignore from ._models import SearchError # type: ignore + from ._models import SearchField # type: ignore + from ._models import SearchIndex # type: ignore + from ._models import SearchIndexer # type: ignore + from ._models import SearchIndexerDataContainer # type: ignore + from ._models import SearchIndexerDataSource # type: ignore + from ._models import SearchIndexerError # type: ignore + from ._models import SearchIndexerLimits # type: ignore + from ._models import SearchIndexerSkill # type: ignore + from ._models import SearchIndexerSkillset # type: ignore + from ._models import SearchIndexerStatus # type: ignore + from ._models import SearchIndexerWarning # type: ignore + from ._models import SearchResourceEncryptionKey # type: ignore from ._models import SentimentSkill # type: ignore from ._models import ServiceCounters # type: ignore from ._models import ServiceLimits # type: ignore from ._models import ServiceStatistics # type: ignore from ._models import ShaperSkill # type: ignore from ._models import ShingleTokenFilter # type: ignore - from ._models import Skill # type: ignore - from ._models import Skillset # type: ignore + from ._models import Similarity # type: ignore from ._models import SnowballTokenFilter # type: ignore from ._models import SoftDeleteColumnDeletionDetectionPolicy # type: ignore from ._models import SplitSkill # type: ignore from ._models import SqlIntegratedChangeTrackingPolicy # type: ignore - from ._models import StandardAnalyzer # type: ignore - from ._models import StandardTokenizer # type: ignore - from ._models import StandardTokenizerV2 # type: ignore from ._models import StemmerOverrideTokenFilter # type: ignore from ._models import StemmerTokenFilter # type: ignore from ._models import StopAnalyzer # type: ignore @@ -235,8 +241,6 @@ from ._models import TextTranslationSkill # type: ignore from ._models import TextWeights # type: ignore from ._models import TokenFilter # type: ignore - from ._models import TokenInfo # type: ignore - from ._models import Tokenizer # type: ignore from ._models import TruncateTokenFilter # type: ignore from ._models import UaxUrlEmailTokenizer # type: ignore from ._models import UniqueTokenFilter # type: ignore @@ -244,10 +248,7 @@ from ._models import WordDelimiterTokenFilter # type: ignore from ._search_service_client_enums import ( - AnalyzerName, CjkBigramTokenFilterScripts, - DataSourceType, - DataType, EdgeNGramTokenFilterSide, EntityCategory, EntityRecognitionSkillLanguage, @@ -256,6 +257,8 @@ IndexerExecutionStatus, IndexerStatus, KeyPhraseExtractionSkillLanguage, + LexicalAnalyzerName, + LexicalTokenizerName, MicrosoftStemmingTokenizerLanguage, MicrosoftTokenizerLanguage, OcrSkillLanguage, @@ -263,6 +266,8 @@ RegexFlags, ScoringFunctionAggregation, ScoringFunctionInterpolation, + SearchFieldDataType, + SearchIndexerDataSourceType, SentimentSkillLanguage, SnowballTokenFilterLanguage, SplitSkillLanguage, @@ -273,19 +278,19 @@ TextTranslationSkillLanguage, TokenCharacterKind, TokenFilterName, - TokenizerName, VisualFeature, ) __all__ = [ - 'AccessCondition', 'AnalyzeRequest', 'AnalyzeResult', - 'Analyzer', + 'AnalyzedTokenInfo', 'AsciiFoldingTokenFilter', 'AzureActiveDirectoryApplicationCredentials', + 'BM25Similarity', 'CharFilter', 'CjkBigramTokenFilter', + 'ClassicSimilarity', 'ClassicTokenizer', 'CognitiveServicesAccount', 'CognitiveServicesAccountKey', @@ -294,9 +299,7 @@ 'CorsOptions', 'CustomAnalyzer', 'DataChangeDetectionPolicy', - 'DataContainer', 'DataDeletionDetectionPolicy', - 'DataSource', 'DataSourceCredentials', 'DefaultCognitiveServicesAccount', 'DictionaryDecompounderTokenFilter', @@ -306,9 +309,7 @@ 'EdgeNGramTokenFilterV2', 'EdgeNGramTokenizer', 'ElisionTokenFilter', - 'EncryptionKey', 'EntityRecognitionSkill', - 'Field', 'FieldMapping', 'FieldMappingFunction', 'FreshnessScoringFunction', @@ -316,16 +317,10 @@ 'GetIndexStatisticsResult', 'HighWaterMarkChangeDetectionPolicy', 'ImageAnalysisSkill', - 'Index', - 'Indexer', - 'IndexerExecutionInfo', 'IndexerExecutionResult', - 'IndexerLimits', 'IndexingParameters', 'IndexingSchedule', 'InputFieldMappingEntry', - 'ItemError', - 'ItemWarning', 'KeepTokenFilter', 'KeyPhraseExtractionSkill', 'KeywordMarkerTokenFilter', @@ -333,12 +328,17 @@ 'KeywordTokenizerV2', 'LanguageDetectionSkill', 'LengthTokenFilter', + 'LexicalAnalyzer', + 'LexicalTokenizer', 'LimitTokenFilter', 'ListDataSourcesResult', 'ListIndexersResult', 'ListIndexesResult', 'ListSkillsetsResult', 'ListSynonymMapsResult', + 'LuceneStandardAnalyzer', + 'LuceneStandardTokenizer', + 'LuceneStandardTokenizerV2', 'MagnitudeScoringFunction', 'MagnitudeScoringParameters', 'MappingCharFilter', @@ -362,21 +362,29 @@ 'ScoringFunction', 'ScoringProfile', 'SearchError', + 'SearchField', + 'SearchIndex', + 'SearchIndexer', + 'SearchIndexerDataContainer', + 'SearchIndexerDataSource', + 'SearchIndexerError', + 'SearchIndexerLimits', + 'SearchIndexerSkill', + 'SearchIndexerSkillset', + 'SearchIndexerStatus', + 'SearchIndexerWarning', + 'SearchResourceEncryptionKey', 'SentimentSkill', 'ServiceCounters', 'ServiceLimits', 'ServiceStatistics', 'ShaperSkill', 'ShingleTokenFilter', - 'Skill', - 'Skillset', + 'Similarity', 'SnowballTokenFilter', 'SoftDeleteColumnDeletionDetectionPolicy', 'SplitSkill', 'SqlIntegratedChangeTrackingPolicy', - 'StandardAnalyzer', - 'StandardTokenizer', - 'StandardTokenizerV2', 'StemmerOverrideTokenFilter', 'StemmerTokenFilter', 'StopAnalyzer', @@ -389,17 +397,12 @@ 'TextTranslationSkill', 'TextWeights', 'TokenFilter', - 'TokenInfo', - 'Tokenizer', 'TruncateTokenFilter', 'UaxUrlEmailTokenizer', 'UniqueTokenFilter', 'WebApiSkill', 'WordDelimiterTokenFilter', - 'AnalyzerName', 'CjkBigramTokenFilterScripts', - 'DataSourceType', - 'DataType', 'EdgeNGramTokenFilterSide', 'EntityCategory', 'EntityRecognitionSkillLanguage', @@ -408,6 +411,8 @@ 'IndexerExecutionStatus', 'IndexerStatus', 'KeyPhraseExtractionSkillLanguage', + 'LexicalAnalyzerName', + 'LexicalTokenizerName', 'MicrosoftStemmingTokenizerLanguage', 'MicrosoftTokenizerLanguage', 'OcrSkillLanguage', @@ -415,6 +420,8 @@ 'RegexFlags', 'ScoringFunctionAggregation', 'ScoringFunctionInterpolation', + 'SearchFieldDataType', + 'SearchIndexerDataSourceType', 'SentimentSkillLanguage', 'SnowballTokenFilterLanguage', 'SplitSkillLanguage', @@ -425,6 +432,5 @@ 'TextTranslationSkillLanguage', 'TokenCharacterKind', 'TokenFilterName', - 'TokenizerName', 'VisualFeature', ] diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py index 4474329fee6d..bdb6ee05665b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py @@ -8,69 +8,49 @@ import msrest.serialization -class AccessCondition(msrest.serialization.Model): - """Parameter group. - - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_match': {'key': 'If-Match', 'type': 'str'}, - 'if_none_match': {'key': 'If-None-Match', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(AccessCondition, self).__init__(**kwargs) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - - -class Analyzer(msrest.serialization.Model): - """Base type for analyzers. +class AnalyzedTokenInfo(msrest.serialization.Model): + """Information about a token returned by an analyzer. - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: CustomAnalyzer, PatternAnalyzer, StandardAnalyzer, StopAnalyzer. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str + :ivar token: Required. The token returned by the analyzer. + :vartype token: str + :ivar start_offset: Required. The index of the first character of the token in the input text. + :vartype start_offset: int + :ivar end_offset: Required. The index of the last character of the token in the input text. + :vartype end_offset: int + :ivar position: Required. The position of the token in the input text relative to other tokens. + The first token in the input text has position 0, the next has position 1, and so on. Depending + on the analyzer used, some tokens might have the same position, for example if they are + synonyms of each other. + :vartype position: int """ _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, + 'token': {'required': True, 'readonly': True}, + 'start_offset': {'required': True, 'readonly': True}, + 'end_offset': {'required': True, 'readonly': True}, + 'position': {'required': True, 'readonly': True}, } _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'odata_type': {'#Microsoft.Azure.Search.CustomAnalyzer': 'CustomAnalyzer', '#Microsoft.Azure.Search.PatternAnalyzer': 'PatternAnalyzer', '#Microsoft.Azure.Search.StandardAnalyzer': 'StandardAnalyzer', '#Microsoft.Azure.Search.StopAnalyzer': 'StopAnalyzer'} + 'token': {'key': 'token', 'type': 'str'}, + 'start_offset': {'key': 'startOffset', 'type': 'int'}, + 'end_offset': {'key': 'endOffset', 'type': 'int'}, + 'position': {'key': 'position', 'type': 'int'}, } def __init__( self, **kwargs ): - super(Analyzer, self).__init__(**kwargs) - self.odata_type = None - self.name = kwargs.get('name', None) + super(AnalyzedTokenInfo, self).__init__(**kwargs) + self.token = None + self.start_offset = None + self.end_offset = None + self.position = None class AnalyzeRequest(msrest.serialization.Model): @@ -99,13 +79,13 @@ class AnalyzeRequest(msrest.serialization.Model): 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', 'whitespace'. - :type analyzer: str or ~search_service_client.models.AnalyzerName + :type analyzer: str or ~search_service_client.models.LexicalAnalyzerName :param tokenizer: The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. Possible values include: 'classic', 'edgeNGram', 'keyword_v2', 'letter', 'lowercase', 'microsoft_language_tokenizer', 'microsoft_language_stemming_tokenizer', 'nGram', 'path_hierarchy_v2', 'pattern', 'standard_v2', 'uax_url_email', 'whitespace'. - :type tokenizer: str or ~search_service_client.models.TokenizerName + :type tokenizer: str or ~search_service_client.models.LexicalTokenizerName :param token_filters: An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. :type token_filters: list[str or ~search_service_client.models.TokenFilterName] @@ -144,7 +124,7 @@ class AnalyzeResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param tokens: Required. The list of tokens returned by the analyzer specified in the request. - :type tokens: list[~search_service_client.models.TokenInfo] + :type tokens: list[~search_service_client.models.AnalyzedTokenInfo] """ _validation = { @@ -152,7 +132,7 @@ class AnalyzeResult(msrest.serialization.Model): } _attribute_map = { - 'tokens': {'key': 'tokens', 'type': '[TokenInfo]'}, + 'tokens': {'key': 'tokens', 'type': '[AnalyzedTokenInfo]'}, } def __init__( @@ -271,6 +251,75 @@ def __init__( self.application_secret = kwargs.get('application_secret', None) +class Similarity(msrest.serialization.Model): + """Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: BM25Similarity, ClassicSimilarity. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.BM25Similarity': 'BM25Similarity', '#Microsoft.Azure.Search.ClassicSimilarity': 'ClassicSimilarity'} + } + + def __init__( + self, + **kwargs + ): + super(Similarity, self).__init__(**kwargs) + self.odata_type = None + + +class BM25Similarity(Similarity): + """Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param k1: This property controls the scaling function between the term frequency of each + matching terms and the final relevance score of a document-query pair. By default, a value of + 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. + :type k1: float + :param b: This property controls how the length of a document affects the relevance score. By + default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, + while a value of 1.0 means the score is fully normalized by the length of the document. + :type b: float + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'k1': {'key': 'k1', 'type': 'float'}, + 'b': {'key': 'b', 'type': 'float'}, + } + + def __init__( + self, + **kwargs + ): + super(BM25Similarity, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.BM25Similarity' + self.k1 = kwargs.get('k1', None) + self.b = kwargs.get('b', None) + + class CharFilter(msrest.serialization.Model): """Base type for character filters. @@ -312,7 +361,7 @@ def __init__( class CjkBigramTokenFilter(TokenFilter): - """Forms bigrams of CJK terms that are generated from StandardTokenizer. This token filter is implemented using Apache Lucene. + """Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -352,11 +401,36 @@ def __init__( self.output_unigrams = kwargs.get('output_unigrams', False) -class Tokenizer(msrest.serialization.Model): +class ClassicSimilarity(Similarity): + """Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ClassicSimilarity, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.ClassicSimilarity' + + +class LexicalTokenizer(msrest.serialization.Model): """Base type for tokenizers. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, PathHierarchyTokenizerV2, PatternTokenizer, StandardTokenizer, StandardTokenizerV2, UaxUrlEmailTokenizer. + sub-classes are: ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, PathHierarchyTokenizerV2, PatternTokenizer, LuceneStandardTokenizer, LuceneStandardTokenizerV2, UaxUrlEmailTokenizer. All required parameters must be populated in order to send to Azure. @@ -380,19 +454,19 @@ class Tokenizer(msrest.serialization.Model): } _subtype_map = { - 'odata_type': {'#Microsoft.Azure.Search.ClassicTokenizer': 'ClassicTokenizer', '#Microsoft.Azure.Search.EdgeNGramTokenizer': 'EdgeNGramTokenizer', '#Microsoft.Azure.Search.KeywordTokenizer': 'KeywordTokenizer', '#Microsoft.Azure.Search.KeywordTokenizerV2': 'KeywordTokenizerV2', '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer': 'MicrosoftLanguageStemmingTokenizer', '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer': 'MicrosoftLanguageTokenizer', '#Microsoft.Azure.Search.NGramTokenizer': 'NGramTokenizer', '#Microsoft.Azure.Search.PathHierarchyTokenizerV2': 'PathHierarchyTokenizerV2', '#Microsoft.Azure.Search.PatternTokenizer': 'PatternTokenizer', '#Microsoft.Azure.Search.StandardTokenizer': 'StandardTokenizer', '#Microsoft.Azure.Search.StandardTokenizerV2': 'StandardTokenizerV2', '#Microsoft.Azure.Search.UaxUrlEmailTokenizer': 'UaxUrlEmailTokenizer'} + 'odata_type': {'#Microsoft.Azure.Search.ClassicTokenizer': 'ClassicTokenizer', '#Microsoft.Azure.Search.EdgeNGramTokenizer': 'EdgeNGramTokenizer', '#Microsoft.Azure.Search.KeywordTokenizer': 'KeywordTokenizer', '#Microsoft.Azure.Search.KeywordTokenizerV2': 'KeywordTokenizerV2', '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer': 'MicrosoftLanguageStemmingTokenizer', '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer': 'MicrosoftLanguageTokenizer', '#Microsoft.Azure.Search.NGramTokenizer': 'NGramTokenizer', '#Microsoft.Azure.Search.PathHierarchyTokenizerV2': 'PathHierarchyTokenizerV2', '#Microsoft.Azure.Search.PatternTokenizer': 'PatternTokenizer', '#Microsoft.Azure.Search.StandardTokenizer': 'LuceneStandardTokenizer', '#Microsoft.Azure.Search.StandardTokenizerV2': 'LuceneStandardTokenizerV2', '#Microsoft.Azure.Search.UaxUrlEmailTokenizer': 'UaxUrlEmailTokenizer'} } def __init__( self, **kwargs ): - super(Tokenizer, self).__init__(**kwargs) + super(LexicalTokenizer, self).__init__(**kwargs) self.odata_type = None self.name = kwargs.get('name', None) -class ClassicTokenizer(Tokenizer): +class ClassicTokenizer(LexicalTokenizer): """Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -550,7 +624,7 @@ def __init__( self.use_query_mode = kwargs.get('use_query_mode', False) -class Skill(msrest.serialization.Model): +class SearchIndexerSkill(msrest.serialization.Model): """Base type for skills. You probably want to use the sub-classes and not this class directly. Known @@ -602,7 +676,7 @@ def __init__( self, **kwargs ): - super(Skill, self).__init__(**kwargs) + super(SearchIndexerSkill, self).__init__(**kwargs) self.odata_type = None self.name = kwargs.get('name', None) self.description = kwargs.get('description', None) @@ -611,7 +685,7 @@ def __init__( self.outputs = kwargs.get('outputs', None) -class ConditionalSkill(Skill): +class ConditionalSkill(SearchIndexerSkill): """A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. All required parameters must be populated in order to send to Azure. @@ -692,7 +766,47 @@ def __init__( self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) -class CustomAnalyzer(Analyzer): +class LexicalAnalyzer(msrest.serialization.Model): + """Base type for analyzers. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: CustomAnalyzer, PatternAnalyzer, LuceneStandardAnalyzer, StopAnalyzer. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.CustomAnalyzer': 'CustomAnalyzer', '#Microsoft.Azure.Search.PatternAnalyzer': 'PatternAnalyzer', '#Microsoft.Azure.Search.StandardAnalyzer': 'LuceneStandardAnalyzer', '#Microsoft.Azure.Search.StopAnalyzer': 'StopAnalyzer'} + } + + def __init__( + self, + **kwargs + ): + super(LexicalAnalyzer, self).__init__(**kwargs) + self.odata_type = None + self.name = kwargs.get('name', None) + + +class CustomAnalyzer(LexicalAnalyzer): """Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. All required parameters must be populated in order to send to Azure. @@ -709,7 +823,7 @@ class CustomAnalyzer(Analyzer): 'edgeNGram', 'keyword_v2', 'letter', 'lowercase', 'microsoft_language_tokenizer', 'microsoft_language_stemming_tokenizer', 'nGram', 'path_hierarchy_v2', 'pattern', 'standard_v2', 'uax_url_email', 'whitespace'. - :type tokenizer: str or ~search_service_client.models.TokenizerName + :type tokenizer: str or ~search_service_client.models.LexicalTokenizerName :param token_filters: A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. @@ -778,37 +892,6 @@ def __init__( self.odata_type = None -class DataContainer(msrest.serialization.Model): - """Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the table or view (for Azure SQL data source) or collection - (for CosmosDB data source) that will be indexed. - :type name: str - :param query: A query that is applied to this data container. The syntax and meaning of this - parameter is datasource-specific. Not supported by Azure SQL datasources. - :type query: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'query': {'key': 'query', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(DataContainer, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.query = kwargs.get('query', None) - - class DataDeletionDetectionPolicy(msrest.serialization.Model): """Base type for data deletion detection policies. @@ -842,63 +925,6 @@ def __init__( self.odata_type = None -class DataSource(msrest.serialization.Model): - """Represents a datasource definition, which can be used to configure an indexer. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the datasource. - :type name: str - :param description: The description of the datasource. - :type description: str - :param type: Required. The type of the datasource. Possible values include: 'azuresql', - 'cosmosdb', 'azureblob', 'azuretable', 'mysql'. - :type type: str or ~search_service_client.models.DataSourceType - :param credentials: Required. Credentials for the datasource. - :type credentials: ~search_service_client.models.DataSourceCredentials - :param container: Required. The data container for the datasource. - :type container: ~search_service_client.models.DataContainer - :param data_change_detection_policy: The data change detection policy for the datasource. - :type data_change_detection_policy: ~search_service_client.models.DataChangeDetectionPolicy - :param data_deletion_detection_policy: The data deletion detection policy for the datasource. - :type data_deletion_detection_policy: ~search_service_client.models.DataDeletionDetectionPolicy - :param e_tag: The ETag of the DataSource. - :type e_tag: str - """ - - _validation = { - 'name': {'required': True}, - 'type': {'required': True}, - 'credentials': {'required': True}, - 'container': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'DataSourceCredentials'}, - 'container': {'key': 'container', 'type': 'DataContainer'}, - 'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'}, - 'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(DataSource, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.description = kwargs.get('description', None) - self.type = kwargs.get('type', None) - self.credentials = kwargs.get('credentials', None) - self.container = kwargs.get('container', None) - self.data_change_detection_policy = kwargs.get('data_change_detection_policy', None) - self.data_deletion_detection_policy = kwargs.get('data_deletion_detection_policy', None) - self.e_tag = kwargs.get('e_tag', None) - - class DataSourceCredentials(msrest.serialization.Model): """Represents credentials that can be used to connect to a datasource. @@ -1228,7 +1254,7 @@ def __init__( self.side = kwargs.get('side', None) -class EdgeNGramTokenizer(Tokenizer): +class EdgeNGramTokenizer(LexicalTokenizer): """Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -1311,53 +1337,8 @@ def __init__( self.articles = kwargs.get('articles', None) -class EncryptionKey(msrest.serialization.Model): - """A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. - - All required parameters must be populated in order to send to Azure. - - :param key_vault_key_name: Required. The name of your Azure Key Vault key to be used to encrypt - your data at rest. - :type key_vault_key_name: str - :param key_vault_key_version: Required. The version of your Azure Key Vault key to be used to - encrypt your data at rest. - :type key_vault_key_version: str - :param key_vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, - that contains the key to be used to encrypt your data at rest. An example URI might be - https://my-keyvault-name.vault.azure.net. - :type key_vault_uri: str - :param access_credentials: Optional Azure Active Directory credentials used for accessing your - Azure Key Vault. Not required if using managed identity instead. - :type access_credentials: - ~search_service_client.models.AzureActiveDirectoryApplicationCredentials - """ - - _validation = { - 'key_vault_key_name': {'required': True}, - 'key_vault_key_version': {'required': True}, - 'key_vault_uri': {'required': True}, - } - - _attribute_map = { - 'key_vault_key_name': {'key': 'keyVaultKeyName', 'type': 'str'}, - 'key_vault_key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'}, - 'key_vault_uri': {'key': 'keyVaultUri', 'type': 'str'}, - 'access_credentials': {'key': 'accessCredentials', 'type': 'AzureActiveDirectoryApplicationCredentials'}, - } - - def __init__( - self, - **kwargs - ): - super(EncryptionKey, self).__init__(**kwargs) - self.key_vault_key_name = kwargs.get('key_vault_key_name', None) - self.key_vault_key_version = kwargs.get('key_vault_key_version', None) - self.key_vault_uri = kwargs.get('key_vault_uri', None) - self.access_credentials = kwargs.get('access_credentials', None) - - -class EntityRecognitionSkill(Skill): - """Text analytics entity recognition. +class EntityRecognitionSkill(SearchIndexerSkill): + """Text analytics entity recognition. All required parameters must be populated in order to send to Azure. @@ -1429,184 +1410,6 @@ def __init__( self.minimum_precision = kwargs.get('minimum_precision', None) -class Field(msrest.serialization.Model): - """Represents a field in an index definition, which describes the name, data type, and search behavior of a field. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the field, which must be unique within the fields collection - of the index or parent field. - :type name: str - :param type: Required. The data type of the field. Possible values include: 'Edm.String', - 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', - 'Edm.GeographyPoint', 'Edm.ComplexType'. - :type type: str or ~search_service_client.models.DataType - :param key: A value indicating whether the field uniquely identifies documents in the index. - Exactly one top-level field in each index must be chosen as the key field and it must be of - type Edm.String. Key fields can be used to look up documents directly and update or delete - specific documents. Default is false for simple fields and null for complex fields. - :type key: bool - :param retrievable: A value indicating whether the field can be returned in a search result. - You can disable this option if you want to use a field (for example, margin) as a filter, - sorting, or scoring mechanism but do not want the field to be visible to the end user. This - property must be true for key fields, and it must be null for complex fields. This property can - be changed on existing fields. Enabling this property does not cause any increase in index - storage requirements. Default is true for simple fields and null for complex fields. - :type retrievable: bool - :param searchable: A value indicating whether the field is full-text searchable. This means it - will undergo analysis such as word-breaking during indexing. If you set a searchable field to a - value like "sunny day", internally it will be split into the individual tokens "sunny" and - "day". This enables full-text searches for these terms. Fields of type Edm.String or - Collection(Edm.String) are searchable by default. This property must be false for simple fields - of other non-string data types, and it must be null for complex fields. Note: searchable fields - consume extra space in your index since Azure Cognitive Search will store an additional - tokenized version of the field value for full-text searches. If you want to save space in your - index and you don't need a field to be included in searches, set searchable to false. - :type searchable: bool - :param filterable: A value indicating whether to enable the field to be referenced in $filter - queries. filterable differs from searchable in how strings are handled. Fields of type - Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so - comparisons are for exact matches only. For example, if you set such a field f to "sunny day", - $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property - must be null for complex fields. Default is true for simple fields and null for complex fields. - :type filterable: bool - :param sortable: A value indicating whether to enable the field to be referenced in $orderby - expressions. By default Azure Cognitive Search sorts results by score, but in many experiences - users will want to sort by fields in the documents. A simple field can be sortable only if it - is single-valued (it has a single value in the scope of the parent document). Simple collection - fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex - collections are also multi-valued, and therefore cannot be sortable. This is true whether it's - an immediate parent field, or an ancestor field, that's the complex collection. Complex fields - cannot be sortable and the sortable property must be null for such fields. The default for - sortable is true for single-valued simple fields, false for multi-valued simple fields, and - null for complex fields. - :type sortable: bool - :param facetable: A value indicating whether to enable the field to be referenced in facet - queries. Typically used in a presentation of search results that includes hit count by category - (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so - on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or - Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple - fields. - :type facetable: bool - :param analyzer: The name of the analyzer to use for the field. This option can be used only - with searchable fields and it can't be set together with either searchAnalyzer or - indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null - for complex fields. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', - 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- - Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', - 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', - 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', - 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', - 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', - 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', - 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', - 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', - 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- - PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', - 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', - 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', - 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', - 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', - 'simple', 'stop', 'whitespace'. - :type analyzer: str or ~search_service_client.models.AnalyzerName - :param search_analyzer: The name of the analyzer used at search time for the field. This option - can be used only with searchable fields. It must be set together with indexAnalyzer and it - cannot be set together with the analyzer option. This property cannot be set to the name of a - language analyzer; use the analyzer property instead if you need a language analyzer. This - analyzer can be updated on an existing field. Must be null for complex fields. Possible values - include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft', - 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh-Hans.lucene', 'zh- - Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', 'cs.lucene', 'da.microsoft', - 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', 'en.lucene', 'et.microsoft', - 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', 'gl.lucene', 'de.microsoft', - 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', 'he.microsoft', 'hi.microsoft', - 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', 'id.microsoft', 'id.lucene', - 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', 'ja.lucene', 'kn.microsoft', - 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', 'lt.microsoft', 'ml.microsoft', - 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', 'fa.lucene', 'pl.microsoft', - 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', 'pt-PT.lucene', - 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- - cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', - 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', - 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', - 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', - 'whitespace'. - :type search_analyzer: str or ~search_service_client.models.AnalyzerName - :param index_analyzer: The name of the analyzer used at indexing time for the field. This - option can be used only with searchable fields. It must be set together with searchAnalyzer and - it cannot be set together with the analyzer option. This property cannot be set to the name of - a language analyzer; use the analyzer property instead if you need a language analyzer. Once - the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. - Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', - 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh- - Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', - 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', - 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', - 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', - 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', - 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', - 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', - 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', - 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', - 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- - cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', - 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', - 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', - 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', - 'whitespace'. - :type index_analyzer: str or ~search_service_client.models.AnalyzerName - :param synonym_maps: A list of the names of synonym maps to associate with this field. This - option can be used only with searchable fields. Currently only one synonym map per field is - supported. Assigning a synonym map to a field ensures that query terms targeting that field are - expanded at query-time using the rules in the synonym map. This attribute can be changed on - existing fields. Must be null or an empty collection for complex fields. - :type synonym_maps: list[str] - :param fields: A list of sub-fields if this is a field of type Edm.ComplexType or - Collection(Edm.ComplexType). Must be null or empty for simple fields. - :type fields: list[~search_service_client.models.Field] - """ - - _validation = { - 'name': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'key': {'key': 'key', 'type': 'bool'}, - 'retrievable': {'key': 'retrievable', 'type': 'bool'}, - 'searchable': {'key': 'searchable', 'type': 'bool'}, - 'filterable': {'key': 'filterable', 'type': 'bool'}, - 'sortable': {'key': 'sortable', 'type': 'bool'}, - 'facetable': {'key': 'facetable', 'type': 'bool'}, - 'analyzer': {'key': 'analyzer', 'type': 'str'}, - 'search_analyzer': {'key': 'searchAnalyzer', 'type': 'str'}, - 'index_analyzer': {'key': 'indexAnalyzer', 'type': 'str'}, - 'synonym_maps': {'key': 'synonymMaps', 'type': '[str]'}, - 'fields': {'key': 'fields', 'type': '[Field]'}, - } - - def __init__( - self, - **kwargs - ): - super(Field, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.type = kwargs.get('type', None) - self.key = kwargs.get('key', None) - self.retrievable = kwargs.get('retrievable', None) - self.searchable = kwargs.get('searchable', None) - self.filterable = kwargs.get('filterable', None) - self.sortable = kwargs.get('sortable', None) - self.facetable = kwargs.get('facetable', None) - self.analyzer = kwargs.get('analyzer', None) - self.search_analyzer = kwargs.get('search_analyzer', None) - self.index_analyzer = kwargs.get('index_analyzer', None) - self.synonym_maps = kwargs.get('synonym_maps', None) - self.fields = kwargs.get('fields', None) - - class FieldMapping(msrest.serialization.Model): """Defines a mapping between a field in a data source and a target field in an index. @@ -1805,7 +1608,7 @@ def __init__( self.high_water_mark_column_name = kwargs.get('high_water_mark_column_name', None) -class ImageAnalysisSkill(Skill): +class ImageAnalysisSkill(SearchIndexerSkill): """A skill that analyzes image files. It extracts a rich set of visual features based on the image content. All required parameters must be populated in order to send to Azure. @@ -1867,243 +1670,50 @@ def __init__( self.details = kwargs.get('details', None) -class Index(msrest.serialization.Model): - """Represents a search index definition, which describes the fields and search behavior of an index. +class IndexerExecutionResult(msrest.serialization.Model): + """Represents the result of an individual indexer execution. + + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the index. - :type name: str - :param fields: Required. The fields of the index. - :type fields: list[~search_service_client.models.Field] - :param scoring_profiles: The scoring profiles for the index. - :type scoring_profiles: list[~search_service_client.models.ScoringProfile] - :param default_scoring_profile: The name of the scoring profile to use if none is specified in - the query. If this property is not set and no scoring profile is specified in the query, then - default scoring (tf-idf) will be used. - :type default_scoring_profile: str - :param cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. - :type cors_options: ~search_service_client.models.CorsOptions - :param suggesters: The suggesters for the index. - :type suggesters: list[~search_service_client.models.Suggester] - :param analyzers: The analyzers for the index. - :type analyzers: list[~search_service_client.models.Analyzer] - :param tokenizers: The tokenizers for the index. - :type tokenizers: list[~search_service_client.models.Tokenizer] - :param token_filters: The token filters for the index. - :type token_filters: list[~search_service_client.models.TokenFilter] - :param char_filters: The character filters for the index. - :type char_filters: list[~search_service_client.models.CharFilter] - :param encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your data when you - want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive - Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive - Search will ignore attempts to set this property to null. You can change this property as - needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with - customer-managed keys is not available for free search services, and is only available for paid - services created on or after January 1, 2019. - :type encryption_key: ~search_service_client.models.EncryptionKey - :param e_tag: The ETag of the index. - :type e_tag: str + :ivar status: Required. The outcome of this indexer execution. Possible values include: + 'transientFailure', 'success', 'inProgress', 'reset'. + :vartype status: str or ~search_service_client.models.IndexerExecutionStatus + :ivar error_message: The error message indicating the top-level error, if any. + :vartype error_message: str + :ivar start_time: The start time of this indexer execution. + :vartype start_time: ~datetime.datetime + :ivar end_time: The end time of this indexer execution, if the execution has already completed. + :vartype end_time: ~datetime.datetime + :ivar errors: Required. The item-level indexing errors. + :vartype errors: list[~search_service_client.models.SearchIndexerError] + :ivar warnings: Required. The item-level indexing warnings. + :vartype warnings: list[~search_service_client.models.SearchIndexerWarning] + :ivar item_count: Required. The number of items that were processed during this indexer + execution. This includes both successfully processed items and items where indexing was + attempted but failed. + :vartype item_count: int + :ivar failed_item_count: Required. The number of items that failed to be indexed during this + indexer execution. + :vartype failed_item_count: int + :ivar initial_tracking_state: Change tracking state with which an indexer execution started. + :vartype initial_tracking_state: str + :ivar final_tracking_state: Change tracking state with which an indexer execution finished. + :vartype final_tracking_state: str """ _validation = { - 'name': {'required': True}, - 'fields': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'fields': {'key': 'fields', 'type': '[Field]'}, - 'scoring_profiles': {'key': 'scoringProfiles', 'type': '[ScoringProfile]'}, - 'default_scoring_profile': {'key': 'defaultScoringProfile', 'type': 'str'}, - 'cors_options': {'key': 'corsOptions', 'type': 'CorsOptions'}, - 'suggesters': {'key': 'suggesters', 'type': '[Suggester]'}, - 'analyzers': {'key': 'analyzers', 'type': '[Analyzer]'}, - 'tokenizers': {'key': 'tokenizers', 'type': '[Tokenizer]'}, - 'token_filters': {'key': 'tokenFilters', 'type': '[TokenFilter]'}, - 'char_filters': {'key': 'charFilters', 'type': '[CharFilter]'}, - 'encryption_key': {'key': 'encryptionKey', 'type': 'EncryptionKey'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(Index, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.fields = kwargs.get('fields', None) - self.scoring_profiles = kwargs.get('scoring_profiles', None) - self.default_scoring_profile = kwargs.get('default_scoring_profile', None) - self.cors_options = kwargs.get('cors_options', None) - self.suggesters = kwargs.get('suggesters', None) - self.analyzers = kwargs.get('analyzers', None) - self.tokenizers = kwargs.get('tokenizers', None) - self.token_filters = kwargs.get('token_filters', None) - self.char_filters = kwargs.get('char_filters', None) - self.encryption_key = kwargs.get('encryption_key', None) - self.e_tag = kwargs.get('e_tag', None) - - -class Indexer(msrest.serialization.Model): - """Represents an indexer. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the indexer. - :type name: str - :param description: The description of the indexer. - :type description: str - :param data_source_name: Required. The name of the datasource from which this indexer reads - data. - :type data_source_name: str - :param skillset_name: The name of the skillset executing with this indexer. - :type skillset_name: str - :param target_index_name: Required. The name of the index to which this indexer writes data. - :type target_index_name: str - :param schedule: The schedule for this indexer. - :type schedule: ~search_service_client.models.IndexingSchedule - :param parameters: Parameters for indexer execution. - :type parameters: ~search_service_client.models.IndexingParameters - :param field_mappings: Defines mappings between fields in the data source and corresponding - target fields in the index. - :type field_mappings: list[~search_service_client.models.FieldMapping] - :param output_field_mappings: Output field mappings are applied after enrichment and - immediately before indexing. - :type output_field_mappings: list[~search_service_client.models.FieldMapping] - :param is_disabled: A value indicating whether the indexer is disabled. Default is false. - :type is_disabled: bool - :param e_tag: The ETag of the Indexer. - :type e_tag: str - """ - - _validation = { - 'name': {'required': True}, - 'data_source_name': {'required': True}, - 'target_index_name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'data_source_name': {'key': 'dataSourceName', 'type': 'str'}, - 'skillset_name': {'key': 'skillsetName', 'type': 'str'}, - 'target_index_name': {'key': 'targetIndexName', 'type': 'str'}, - 'schedule': {'key': 'schedule', 'type': 'IndexingSchedule'}, - 'parameters': {'key': 'parameters', 'type': 'IndexingParameters'}, - 'field_mappings': {'key': 'fieldMappings', 'type': '[FieldMapping]'}, - 'output_field_mappings': {'key': 'outputFieldMappings', 'type': '[FieldMapping]'}, - 'is_disabled': {'key': 'disabled', 'type': 'bool'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(Indexer, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.description = kwargs.get('description', None) - self.data_source_name = kwargs.get('data_source_name', None) - self.skillset_name = kwargs.get('skillset_name', None) - self.target_index_name = kwargs.get('target_index_name', None) - self.schedule = kwargs.get('schedule', None) - self.parameters = kwargs.get('parameters', None) - self.field_mappings = kwargs.get('field_mappings', None) - self.output_field_mappings = kwargs.get('output_field_mappings', None) - self.is_disabled = kwargs.get('is_disabled', False) - self.e_tag = kwargs.get('e_tag', None) - - -class IndexerExecutionInfo(msrest.serialization.Model): - """Represents the current status and execution history of an indexer. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar status: Required. Overall indexer status. Possible values include: 'unknown', 'error', - 'running'. - :vartype status: str or ~search_service_client.models.IndexerStatus - :ivar last_result: The result of the most recent or an in-progress indexer execution. - :vartype last_result: ~search_service_client.models.IndexerExecutionResult - :ivar execution_history: Required. History of the recent indexer executions, sorted in reverse - chronological order. - :vartype execution_history: list[~search_service_client.models.IndexerExecutionResult] - :ivar limits: Required. The execution limits for the indexer. - :vartype limits: ~search_service_client.models.IndexerLimits - """ - - _validation = { - 'status': {'required': True, 'readonly': True}, - 'last_result': {'readonly': True}, - 'execution_history': {'required': True, 'readonly': True}, - 'limits': {'required': True, 'readonly': True}, - } - - _attribute_map = { - 'status': {'key': 'status', 'type': 'str'}, - 'last_result': {'key': 'lastResult', 'type': 'IndexerExecutionResult'}, - 'execution_history': {'key': 'executionHistory', 'type': '[IndexerExecutionResult]'}, - 'limits': {'key': 'limits', 'type': 'IndexerLimits'}, - } - - def __init__( - self, - **kwargs - ): - super(IndexerExecutionInfo, self).__init__(**kwargs) - self.status = None - self.last_result = None - self.execution_history = None - self.limits = None - - -class IndexerExecutionResult(msrest.serialization.Model): - """Represents the result of an individual indexer execution. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar status: Required. The outcome of this indexer execution. Possible values include: - 'transientFailure', 'success', 'inProgress', 'reset'. - :vartype status: str or ~search_service_client.models.IndexerExecutionStatus - :ivar error_message: The error message indicating the top-level error, if any. - :vartype error_message: str - :ivar start_time: The start time of this indexer execution. - :vartype start_time: ~datetime.datetime - :ivar end_time: The end time of this indexer execution, if the execution has already completed. - :vartype end_time: ~datetime.datetime - :ivar errors: Required. The item-level indexing errors. - :vartype errors: list[~search_service_client.models.ItemError] - :ivar warnings: Required. The item-level indexing warnings. - :vartype warnings: list[~search_service_client.models.ItemWarning] - :ivar item_count: Required. The number of items that were processed during this indexer - execution. This includes both successfully processed items and items where indexing was - attempted but failed. - :vartype item_count: int - :ivar failed_item_count: Required. The number of items that failed to be indexed during this - indexer execution. - :vartype failed_item_count: int - :ivar initial_tracking_state: Change tracking state with which an indexer execution started. - :vartype initial_tracking_state: str - :ivar final_tracking_state: Change tracking state with which an indexer execution finished. - :vartype final_tracking_state: str - """ - - _validation = { - 'status': {'required': True, 'readonly': True}, - 'error_message': {'readonly': True}, - 'start_time': {'readonly': True}, - 'end_time': {'readonly': True}, - 'errors': {'required': True, 'readonly': True}, - 'warnings': {'required': True, 'readonly': True}, - 'item_count': {'required': True, 'readonly': True}, - 'failed_item_count': {'required': True, 'readonly': True}, - 'initial_tracking_state': {'readonly': True}, - 'final_tracking_state': {'readonly': True}, + 'status': {'required': True, 'readonly': True}, + 'error_message': {'readonly': True}, + 'start_time': {'readonly': True}, + 'end_time': {'readonly': True}, + 'errors': {'required': True, 'readonly': True}, + 'warnings': {'required': True, 'readonly': True}, + 'item_count': {'required': True, 'readonly': True}, + 'failed_item_count': {'required': True, 'readonly': True}, + 'initial_tracking_state': {'readonly': True}, + 'final_tracking_state': {'readonly': True}, } _attribute_map = { @@ -2111,8 +1721,8 @@ class IndexerExecutionResult(msrest.serialization.Model): 'error_message': {'key': 'errorMessage', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'errors': {'key': 'errors', 'type': '[ItemError]'}, - 'warnings': {'key': 'warnings', 'type': '[ItemWarning]'}, + 'errors': {'key': 'errors', 'type': '[SearchIndexerError]'}, + 'warnings': {'key': 'warnings', 'type': '[SearchIndexerWarning]'}, 'item_count': {'key': 'itemsProcessed', 'type': 'int'}, 'failed_item_count': {'key': 'itemsFailed', 'type': 'int'}, 'initial_tracking_state': {'key': 'initialTrackingState', 'type': 'str'}, @@ -2136,44 +1746,6 @@ def __init__( self.final_tracking_state = None -class IndexerLimits(msrest.serialization.Model): - """IndexerLimits. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar max_run_time: The maximum duration that the indexer is permitted to run for one - execution. - :vartype max_run_time: ~datetime.timedelta - :ivar max_document_extraction_size: The maximum size of a document, in bytes, which will be - considered valid for indexing. - :vartype max_document_extraction_size: long - :ivar max_document_content_characters_to_extract: The maximum number of characters that will be - extracted from a document picked up for indexing. - :vartype max_document_content_characters_to_extract: long - """ - - _validation = { - 'max_run_time': {'readonly': True}, - 'max_document_extraction_size': {'readonly': True}, - 'max_document_content_characters_to_extract': {'readonly': True}, - } - - _attribute_map = { - 'max_run_time': {'key': 'maxRunTime', 'type': 'duration'}, - 'max_document_extraction_size': {'key': 'maxDocumentExtractionSize', 'type': 'long'}, - 'max_document_content_characters_to_extract': {'key': 'maxDocumentContentCharactersToExtract', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(IndexerLimits, self).__init__(**kwargs) - self.max_run_time = None - self.max_document_extraction_size = None - self.max_document_content_characters_to_extract = None - - class IndexingParameters(msrest.serialization.Model): """Represents parameters for indexer execution. @@ -2275,143 +1847,33 @@ def __init__( self.inputs = kwargs.get('inputs', None) -class ItemError(msrest.serialization.Model): - """Represents an item- or document-level indexing error. - - Variables are only populated by the server, and will be ignored when sending a request. +class KeepTokenFilter(TokenFilter): + """A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. - :ivar key: The key of the item for which indexing failed. - :vartype key: str - :ivar error_message: Required. The message describing the error that occurred while processing - the item. - :vartype error_message: str - :ivar status_code: Required. The status code indicating why the indexing operation failed. - Possible values include: 400 for a malformed input document, 404 for document not found, 409 - for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the - service is too busy. - :vartype status_code: int - :ivar name: The name of the source at which the error originated. For example, this could refer - to a particular skill in the attached skillset. This may not be always available. - :vartype name: str - :ivar details: Additional, verbose details about the error to assist in debugging the indexer. - This may not be always available. - :vartype details: str - :ivar documentation_link: A link to a troubleshooting guide for these classes of errors. This - may not be always available. - :vartype documentation_link: str + :param odata_type: Required. Identifies the concrete type of the token filter.Constant filled + by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param keep_words: Required. The list of words to keep. + :type keep_words: list[str] + :param lower_case_keep_words: A value indicating whether to lower case all words first. Default + is false. + :type lower_case_keep_words: bool """ _validation = { - 'key': {'readonly': True}, - 'error_message': {'required': True, 'readonly': True}, - 'status_code': {'required': True, 'readonly': True}, - 'name': {'readonly': True}, - 'details': {'readonly': True}, - 'documentation_link': {'readonly': True}, + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'keep_words': {'required': True}, } _attribute_map = { - 'key': {'key': 'key', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - 'status_code': {'key': 'statusCode', 'type': 'int'}, - 'name': {'key': 'name', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ItemError, self).__init__(**kwargs) - self.key = None - self.error_message = None - self.status_code = None - self.name = None - self.details = None - self.documentation_link = None - - -class ItemWarning(msrest.serialization.Model): - """Represents an item-level warning. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar key: The key of the item which generated a warning. - :vartype key: str - :ivar message: Required. The message describing the warning that occurred while processing the - item. - :vartype message: str - :ivar name: The name of the source at which the warning originated. For example, this could - refer to a particular skill in the attached skillset. This may not be always available. - :vartype name: str - :ivar details: Additional, verbose details about the warning to assist in debugging the - indexer. This may not be always available. - :vartype details: str - :ivar documentation_link: A link to a troubleshooting guide for these classes of warnings. This - may not be always available. - :vartype documentation_link: str - """ - - _validation = { - 'key': {'readonly': True}, - 'message': {'required': True, 'readonly': True}, - 'name': {'readonly': True}, - 'details': {'readonly': True}, - 'documentation_link': {'readonly': True}, - } - - _attribute_map = { - 'key': {'key': 'key', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ItemWarning, self).__init__(**kwargs) - self.key = None - self.message = None - self.name = None - self.details = None - self.documentation_link = None - - -class KeepTokenFilter(TokenFilter): - """A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :type odata_type: str - :param name: Required. The name of the token filter. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :type name: str - :param keep_words: Required. The list of words to keep. - :type keep_words: list[str] - :param lower_case_keep_words: A value indicating whether to lower case all words first. Default - is false. - :type lower_case_keep_words: bool - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - 'keep_words': {'required': True}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'keep_words': {'key': 'keepWords', 'type': '[str]'}, 'lower_case_keep_words': {'key': 'keepWordsCase', 'type': 'bool'}, @@ -2427,7 +1889,7 @@ def __init__( self.lower_case_keep_words = kwargs.get('lower_case_keep_words', False) -class KeyPhraseExtractionSkill(Skill): +class KeyPhraseExtractionSkill(SearchIndexerSkill): """A skill that uses text analytics for key phrase extraction. All required parameters must be populated in order to send to Azure. @@ -2530,7 +1992,7 @@ def __init__( self.ignore_case = kwargs.get('ignore_case', False) -class KeywordTokenizer(Tokenizer): +class KeywordTokenizer(LexicalTokenizer): """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -2566,7 +2028,7 @@ def __init__( self.buffer_size = kwargs.get('buffer_size', 256) -class KeywordTokenizerV2(Tokenizer): +class KeywordTokenizerV2(LexicalTokenizer): """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -2604,7 +2066,7 @@ def __init__( self.max_token_length = kwargs.get('max_token_length', 256) -class LanguageDetectionSkill(Skill): +class LanguageDetectionSkill(SearchIndexerSkill): """A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. All required parameters must be populated in order to send to Azure. @@ -2665,25 +2127,25 @@ class LengthTokenFilter(TokenFilter): spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. :type name: str - :param min: The minimum length in characters. Default is 0. Maximum is 300. Must be less than - the value of max. - :type min: int - :param max: The maximum length in characters. Default and maximum is 300. - :type max: int + :param min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be less + than the value of max. + :type min_length: int + :param max_length: The maximum length in characters. Default and maximum is 300. + :type max_length: int """ _validation = { 'odata_type': {'required': True}, 'name': {'required': True}, - 'min': {'maximum': 300}, - 'max': {'maximum': 300}, + 'min_length': {'maximum': 300}, + 'max_length': {'maximum': 300}, } _attribute_map = { 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'min': {'key': 'min', 'type': 'int'}, - 'max': {'key': 'max', 'type': 'int'}, + 'min_length': {'key': 'min', 'type': 'int'}, + 'max_length': {'key': 'max', 'type': 'int'}, } def __init__( @@ -2692,8 +2154,8 @@ def __init__( ): super(LengthTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.LengthTokenFilter' - self.min = kwargs.get('min', 0) - self.max = kwargs.get('max', 300) + self.min_length = kwargs.get('min_length', 0) + self.max_length = kwargs.get('max_length', 300) class LimitTokenFilter(TokenFilter): @@ -2745,7 +2207,7 @@ class ListDataSourcesResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar data_sources: Required. The datasources in the Search service. - :vartype data_sources: list[~search_service_client.models.DataSource] + :vartype data_sources: list[~search_service_client.models.SearchIndexerDataSource] """ _validation = { @@ -2753,7 +2215,7 @@ class ListDataSourcesResult(msrest.serialization.Model): } _attribute_map = { - 'data_sources': {'key': 'value', 'type': '[DataSource]'}, + 'data_sources': {'key': 'value', 'type': '[SearchIndexerDataSource]'}, } def __init__( @@ -2772,7 +2234,7 @@ class ListIndexersResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar indexers: Required. The indexers in the Search service. - :vartype indexers: list[~search_service_client.models.Indexer] + :vartype indexers: list[~search_service_client.models.SearchIndexer] """ _validation = { @@ -2780,7 +2242,7 @@ class ListIndexersResult(msrest.serialization.Model): } _attribute_map = { - 'indexers': {'key': 'value', 'type': '[Indexer]'}, + 'indexers': {'key': 'value', 'type': '[SearchIndexer]'}, } def __init__( @@ -2799,7 +2261,7 @@ class ListIndexesResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar indexes: Required. The indexes in the Search service. - :vartype indexes: list[~search_service_client.models.Index] + :vartype indexes: list[~search_service_client.models.SearchIndex] """ _validation = { @@ -2807,7 +2269,7 @@ class ListIndexesResult(msrest.serialization.Model): } _attribute_map = { - 'indexes': {'key': 'value', 'type': '[Index]'}, + 'indexes': {'key': 'value', 'type': '[SearchIndex]'}, } def __init__( @@ -2819,14 +2281,14 @@ def __init__( class ListSkillsetsResult(msrest.serialization.Model): - """Response from a list Skillset request. If successful, it includes the full definitions of all skillsets. + """Response from a list skillset request. If successful, it includes the full definitions of all skillsets. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar skillsets: Required. The skillsets defined in the Search service. - :vartype skillsets: list[~search_service_client.models.Skillset] + :vartype skillsets: list[~search_service_client.models.SearchIndexerSkillset] """ _validation = { @@ -2834,7 +2296,7 @@ class ListSkillsetsResult(msrest.serialization.Model): } _attribute_map = { - 'skillsets': {'key': 'value', 'type': '[Skillset]'}, + 'skillsets': {'key': 'value', 'type': '[SearchIndexerSkillset]'}, } def __init__( @@ -2872,6 +2334,123 @@ def __init__( self.synonym_maps = None +class LuceneStandardAnalyzer(LexicalAnalyzer): + """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + :param stopwords: A list of stopwords. + :type stopwords: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + 'stopwords': {'key': 'stopwords', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(LuceneStandardAnalyzer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' + self.max_token_length = kwargs.get('max_token_length', 255) + self.stopwords = kwargs.get('stopwords', None) + + +class LuceneStandardTokenizer(LexicalTokenizer): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(LuceneStandardTokenizer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' + self.max_token_length = kwargs.get('max_token_length', 255) + + +class LuceneStandardTokenizerV2(LexicalTokenizer): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(LuceneStandardTokenizerV2, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' + self.max_token_length = kwargs.get('max_token_length', 255) + + class MagnitudeScoringFunction(ScoringFunction): """Defines a function that boosts scores based on the magnitude of a numeric field. @@ -2990,7 +2569,7 @@ def __init__( self.mappings = kwargs.get('mappings', None) -class MergeSkill(Skill): +class MergeSkill(SearchIndexerSkill): """A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. All required parameters must be populated in order to send to Azure. @@ -3049,7 +2628,7 @@ def __init__( self.insert_post_tag = kwargs.get('insert_post_tag', " ") -class MicrosoftLanguageStemmingTokenizer(Tokenizer): +class MicrosoftLanguageStemmingTokenizer(LexicalTokenizer): """Divides text using language-specific rules and reduces words to their base forms. All required parameters must be populated in order to send to Azure. @@ -3104,7 +2683,7 @@ def __init__( self.language = kwargs.get('language', None) -class MicrosoftLanguageTokenizer(Tokenizer): +class MicrosoftLanguageTokenizer(LexicalTokenizer): """Divides text using language-specific rules. All required parameters must be populated in order to send to Azure. @@ -3243,7 +2822,7 @@ def __init__( self.max_gram = kwargs.get('max_gram', 2) -class NGramTokenizer(Tokenizer): +class NGramTokenizer(LexicalTokenizer): """Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3290,7 +2869,7 @@ def __init__( self.token_chars = kwargs.get('token_chars', None) -class OcrSkill(Skill): +class OcrSkill(SearchIndexerSkill): """A skill that extracts text from image files. All required parameters must be populated in order to send to Azure. @@ -3385,7 +2964,7 @@ def __init__( self.target_name = kwargs.get('target_name', None) -class PathHierarchyTokenizerV2(Tokenizer): +class PathHierarchyTokenizerV2(LexicalTokenizer): """Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3439,7 +3018,7 @@ def __init__( self.number_of_tokens_to_skip = kwargs.get('number_of_tokens_to_skip', 0) -class PatternAnalyzer(Analyzer): +class PatternAnalyzer(LexicalAnalyzer): """Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3455,7 +3034,7 @@ class PatternAnalyzer(Analyzer): true. :type lower_case_terms: bool :param pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more whitespace characters. + expression that matches one or more non-word characters. :type pattern: str :param flags: Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. @@ -3616,7 +3195,7 @@ def __init__( self.replacement = kwargs.get('replacement', None) -class PatternTokenizer(Tokenizer): +class PatternTokenizer(LexicalTokenizer): """Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3629,7 +3208,7 @@ class PatternTokenizer(Tokenizer): 128 characters. :type name: str :param pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more whitespace characters. + expression that matches one or more non-word characters. :type pattern: str :param flags: Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. @@ -3774,65 +3353,768 @@ class ScoringProfile(msrest.serialization.Model): """ _validation = { - 'name': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'text_weights': {'key': 'text', 'type': 'TextWeights'}, + 'functions': {'key': 'functions', 'type': '[ScoringFunction]'}, + 'function_aggregation': {'key': 'functionAggregation', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ScoringProfile, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.text_weights = kwargs.get('text_weights', None) + self.functions = kwargs.get('functions', None) + self.function_aggregation = kwargs.get('function_aggregation', None) + + +class SearchError(msrest.serialization.Model): + """Describes an error condition for the Azure Cognitive Search API. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar code: One of a server-defined set of error codes. + :vartype code: str + :ivar message: Required. A human-readable representation of the error. + :vartype message: str + :ivar details: An array of details about specific errors that led to this reported error. + :vartype details: list[~search_service_client.models.SearchError] + """ + + _validation = { + 'code': {'readonly': True}, + 'message': {'required': True, 'readonly': True}, + 'details': {'readonly': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[SearchError]'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchError, self).__init__(**kwargs) + self.code = None + self.message = None + self.details = None + + +class SearchField(msrest.serialization.Model): + """Represents a field in an index definition, which describes the name, data type, and search behavior of a field. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the field, which must be unique within the fields collection + of the index or parent field. + :type name: str + :param type: Required. The data type of the field. Possible values include: 'Edm.String', + 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', + 'Edm.GeographyPoint', 'Edm.ComplexType'. + :type type: str or ~search_service_client.models.SearchFieldDataType + :param key: A value indicating whether the field uniquely identifies documents in the index. + Exactly one top-level field in each index must be chosen as the key field and it must be of + type Edm.String. Key fields can be used to look up documents directly and update or delete + specific documents. Default is false for simple fields and null for complex fields. + :type key: bool + :param retrievable: A value indicating whether the field can be returned in a search result. + You can disable this option if you want to use a field (for example, margin) as a filter, + sorting, or scoring mechanism but do not want the field to be visible to the end user. This + property must be true for key fields, and it must be null for complex fields. This property can + be changed on existing fields. Enabling this property does not cause any increase in index + storage requirements. Default is true for simple fields and null for complex fields. + :type retrievable: bool + :param searchable: A value indicating whether the field is full-text searchable. This means it + will undergo analysis such as word-breaking during indexing. If you set a searchable field to a + value like "sunny day", internally it will be split into the individual tokens "sunny" and + "day". This enables full-text searches for these terms. Fields of type Edm.String or + Collection(Edm.String) are searchable by default. This property must be false for simple fields + of other non-string data types, and it must be null for complex fields. Note: searchable fields + consume extra space in your index since Azure Cognitive Search will store an additional + tokenized version of the field value for full-text searches. If you want to save space in your + index and you don't need a field to be included in searches, set searchable to false. + :type searchable: bool + :param filterable: A value indicating whether to enable the field to be referenced in $filter + queries. filterable differs from searchable in how strings are handled. Fields of type + Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so + comparisons are for exact matches only. For example, if you set such a field f to "sunny day", + $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property + must be null for complex fields. Default is true for simple fields and null for complex fields. + :type filterable: bool + :param sortable: A value indicating whether to enable the field to be referenced in $orderby + expressions. By default Azure Cognitive Search sorts results by score, but in many experiences + users will want to sort by fields in the documents. A simple field can be sortable only if it + is single-valued (it has a single value in the scope of the parent document). Simple collection + fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex + collections are also multi-valued, and therefore cannot be sortable. This is true whether it's + an immediate parent field, or an ancestor field, that's the complex collection. Complex fields + cannot be sortable and the sortable property must be null for such fields. The default for + sortable is true for single-valued simple fields, false for multi-valued simple fields, and + null for complex fields. + :type sortable: bool + :param facetable: A value indicating whether to enable the field to be referenced in facet + queries. Typically used in a presentation of search results that includes hit count by category + (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so + on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or + Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple + fields. + :type facetable: bool + :param analyzer: The name of the analyzer to use for the field. This option can be used only + with searchable fields and it can't be set together with either searchAnalyzer or + indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null + for complex fields. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', + 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- + Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', + 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', + 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', + 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', + 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', + 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', + 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', + 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', + 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- + PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', + 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', + 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', + 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', + 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', + 'simple', 'stop', 'whitespace'. + :type analyzer: str or ~search_service_client.models.LexicalAnalyzerName + :param search_analyzer: The name of the analyzer used at search time for the field. This option + can be used only with searchable fields. It must be set together with indexAnalyzer and it + cannot be set together with the analyzer option. This property cannot be set to the name of a + language analyzer; use the analyzer property instead if you need a language analyzer. This + analyzer can be updated on an existing field. Must be null for complex fields. Possible values + include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft', + 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh-Hans.lucene', 'zh- + Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', 'cs.lucene', 'da.microsoft', + 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', 'en.lucene', 'et.microsoft', + 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', 'gl.lucene', 'de.microsoft', + 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', 'he.microsoft', 'hi.microsoft', + 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', 'id.microsoft', 'id.lucene', + 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', 'ja.lucene', 'kn.microsoft', + 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', 'lt.microsoft', 'ml.microsoft', + 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', 'fa.lucene', 'pl.microsoft', + 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', 'pt-PT.lucene', + 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- + cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', + 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', + 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', + 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', + 'whitespace'. + :type search_analyzer: str or ~search_service_client.models.LexicalAnalyzerName + :param index_analyzer: The name of the analyzer used at indexing time for the field. This + option can be used only with searchable fields. It must be set together with searchAnalyzer and + it cannot be set together with the analyzer option. This property cannot be set to the name of + a language analyzer; use the analyzer property instead if you need a language analyzer. Once + the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. + Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', + 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh- + Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', + 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', + 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', + 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', + 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', + 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', + 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', + 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', + 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', + 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- + cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', + 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', + 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', + 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', + 'whitespace'. + :type index_analyzer: str or ~search_service_client.models.LexicalAnalyzerName + :param synonym_maps: A list of the names of synonym maps to associate with this field. This + option can be used only with searchable fields. Currently only one synonym map per field is + supported. Assigning a synonym map to a field ensures that query terms targeting that field are + expanded at query-time using the rules in the synonym map. This attribute can be changed on + existing fields. Must be null or an empty collection for complex fields. + :type synonym_maps: list[str] + :param fields: A list of sub-fields if this is a field of type Edm.ComplexType or + Collection(Edm.ComplexType). Must be null or empty for simple fields. + :type fields: list[~search_service_client.models.SearchField] + """ + + _validation = { + 'name': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'key': {'key': 'key', 'type': 'bool'}, + 'retrievable': {'key': 'retrievable', 'type': 'bool'}, + 'searchable': {'key': 'searchable', 'type': 'bool'}, + 'filterable': {'key': 'filterable', 'type': 'bool'}, + 'sortable': {'key': 'sortable', 'type': 'bool'}, + 'facetable': {'key': 'facetable', 'type': 'bool'}, + 'analyzer': {'key': 'analyzer', 'type': 'str'}, + 'search_analyzer': {'key': 'searchAnalyzer', 'type': 'str'}, + 'index_analyzer': {'key': 'indexAnalyzer', 'type': 'str'}, + 'synonym_maps': {'key': 'synonymMaps', 'type': '[str]'}, + 'fields': {'key': 'fields', 'type': '[SearchField]'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchField, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.type = kwargs.get('type', None) + self.key = kwargs.get('key', None) + self.retrievable = kwargs.get('retrievable', None) + self.searchable = kwargs.get('searchable', None) + self.filterable = kwargs.get('filterable', None) + self.sortable = kwargs.get('sortable', None) + self.facetable = kwargs.get('facetable', None) + self.analyzer = kwargs.get('analyzer', None) + self.search_analyzer = kwargs.get('search_analyzer', None) + self.index_analyzer = kwargs.get('index_analyzer', None) + self.synonym_maps = kwargs.get('synonym_maps', None) + self.fields = kwargs.get('fields', None) + + +class SearchIndex(msrest.serialization.Model): + """Represents a search index definition, which describes the fields and search behavior of an index. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the index. + :type name: str + :param fields: Required. The fields of the index. + :type fields: list[~search_service_client.models.SearchField] + :param scoring_profiles: The scoring profiles for the index. + :type scoring_profiles: list[~search_service_client.models.ScoringProfile] + :param default_scoring_profile: The name of the scoring profile to use if none is specified in + the query. If this property is not set and no scoring profile is specified in the query, then + default scoring (tf-idf) will be used. + :type default_scoring_profile: str + :param cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. + :type cors_options: ~search_service_client.models.CorsOptions + :param suggesters: The suggesters for the index. + :type suggesters: list[~search_service_client.models.Suggester] + :param analyzers: The analyzers for the index. + :type analyzers: list[~search_service_client.models.LexicalAnalyzer] + :param tokenizers: The tokenizers for the index. + :type tokenizers: list[~search_service_client.models.LexicalTokenizer] + :param token_filters: The token filters for the index. + :type token_filters: list[~search_service_client.models.TokenFilter] + :param char_filters: The character filters for the index. + :type char_filters: list[~search_service_client.models.CharFilter] + :param encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your data when you + want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive + Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive + Search will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with + customer-managed keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :type encryption_key: ~search_service_client.models.SearchResourceEncryptionKey + :param similarity: The type of similarity algorithm to be used when scoring and ranking the + documents matching a search query. The similarity algorithm can only be defined at index + creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity + algorithm is used. + :type similarity: ~search_service_client.models.Similarity + :param e_tag: The ETag of the index. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'fields': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'fields': {'key': 'fields', 'type': '[SearchField]'}, + 'scoring_profiles': {'key': 'scoringProfiles', 'type': '[ScoringProfile]'}, + 'default_scoring_profile': {'key': 'defaultScoringProfile', 'type': 'str'}, + 'cors_options': {'key': 'corsOptions', 'type': 'CorsOptions'}, + 'suggesters': {'key': 'suggesters', 'type': '[Suggester]'}, + 'analyzers': {'key': 'analyzers', 'type': '[LexicalAnalyzer]'}, + 'tokenizers': {'key': 'tokenizers', 'type': '[LexicalTokenizer]'}, + 'token_filters': {'key': 'tokenFilters', 'type': '[TokenFilter]'}, + 'char_filters': {'key': 'charFilters', 'type': '[CharFilter]'}, + 'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'}, + 'similarity': {'key': 'similarity', 'type': 'Similarity'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndex, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.fields = kwargs.get('fields', None) + self.scoring_profiles = kwargs.get('scoring_profiles', None) + self.default_scoring_profile = kwargs.get('default_scoring_profile', None) + self.cors_options = kwargs.get('cors_options', None) + self.suggesters = kwargs.get('suggesters', None) + self.analyzers = kwargs.get('analyzers', None) + self.tokenizers = kwargs.get('tokenizers', None) + self.token_filters = kwargs.get('token_filters', None) + self.char_filters = kwargs.get('char_filters', None) + self.encryption_key = kwargs.get('encryption_key', None) + self.similarity = kwargs.get('similarity', None) + self.e_tag = kwargs.get('e_tag', None) + + +class SearchIndexer(msrest.serialization.Model): + """Represents an indexer. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the indexer. + :type name: str + :param description: The description of the indexer. + :type description: str + :param data_source_name: Required. The name of the datasource from which this indexer reads + data. + :type data_source_name: str + :param skillset_name: The name of the skillset executing with this indexer. + :type skillset_name: str + :param target_index_name: Required. The name of the index to which this indexer writes data. + :type target_index_name: str + :param schedule: The schedule for this indexer. + :type schedule: ~search_service_client.models.IndexingSchedule + :param parameters: Parameters for indexer execution. + :type parameters: ~search_service_client.models.IndexingParameters + :param field_mappings: Defines mappings between fields in the data source and corresponding + target fields in the index. + :type field_mappings: list[~search_service_client.models.FieldMapping] + :param output_field_mappings: Output field mappings are applied after enrichment and + immediately before indexing. + :type output_field_mappings: list[~search_service_client.models.FieldMapping] + :param is_disabled: A value indicating whether the indexer is disabled. Default is false. + :type is_disabled: bool + :param e_tag: The ETag of the indexer. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'data_source_name': {'required': True}, + 'target_index_name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'data_source_name': {'key': 'dataSourceName', 'type': 'str'}, + 'skillset_name': {'key': 'skillsetName', 'type': 'str'}, + 'target_index_name': {'key': 'targetIndexName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'IndexingSchedule'}, + 'parameters': {'key': 'parameters', 'type': 'IndexingParameters'}, + 'field_mappings': {'key': 'fieldMappings', 'type': '[FieldMapping]'}, + 'output_field_mappings': {'key': 'outputFieldMappings', 'type': '[FieldMapping]'}, + 'is_disabled': {'key': 'disabled', 'type': 'bool'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexer, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.description = kwargs.get('description', None) + self.data_source_name = kwargs.get('data_source_name', None) + self.skillset_name = kwargs.get('skillset_name', None) + self.target_index_name = kwargs.get('target_index_name', None) + self.schedule = kwargs.get('schedule', None) + self.parameters = kwargs.get('parameters', None) + self.field_mappings = kwargs.get('field_mappings', None) + self.output_field_mappings = kwargs.get('output_field_mappings', None) + self.is_disabled = kwargs.get('is_disabled', False) + self.e_tag = kwargs.get('e_tag', None) + + +class SearchIndexerDataContainer(msrest.serialization.Model): + """Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the table or view (for Azure SQL data source) or collection + (for CosmosDB data source) that will be indexed. + :type name: str + :param query: A query that is applied to this data container. The syntax and meaning of this + parameter is datasource-specific. Not supported by Azure SQL datasources. + :type query: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'query': {'key': 'query', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexerDataContainer, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.query = kwargs.get('query', None) + + +class SearchIndexerDataSource(msrest.serialization.Model): + """Represents a datasource definition, which can be used to configure an indexer. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the datasource. + :type name: str + :param description: The description of the datasource. + :type description: str + :param type: Required. The type of the datasource. Possible values include: 'azuresql', + 'cosmosdb', 'azureblob', 'azuretable', 'mysql'. + :type type: str or ~search_service_client.models.SearchIndexerDataSourceType + :param credentials: Required. Credentials for the datasource. + :type credentials: ~search_service_client.models.DataSourceCredentials + :param container: Required. The data container for the datasource. + :type container: ~search_service_client.models.SearchIndexerDataContainer + :param data_change_detection_policy: The data change detection policy for the datasource. + :type data_change_detection_policy: ~search_service_client.models.DataChangeDetectionPolicy + :param data_deletion_detection_policy: The data deletion detection policy for the datasource. + :type data_deletion_detection_policy: ~search_service_client.models.DataDeletionDetectionPolicy + :param e_tag: The ETag of the data source. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'type': {'required': True}, + 'credentials': {'required': True}, + 'container': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'DataSourceCredentials'}, + 'container': {'key': 'container', 'type': 'SearchIndexerDataContainer'}, + 'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'}, + 'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexerDataSource, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.description = kwargs.get('description', None) + self.type = kwargs.get('type', None) + self.credentials = kwargs.get('credentials', None) + self.container = kwargs.get('container', None) + self.data_change_detection_policy = kwargs.get('data_change_detection_policy', None) + self.data_deletion_detection_policy = kwargs.get('data_deletion_detection_policy', None) + self.e_tag = kwargs.get('e_tag', None) + + +class SearchIndexerError(msrest.serialization.Model): + """Represents an item- or document-level indexing error. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar key: The key of the item for which indexing failed. + :vartype key: str + :ivar error_message: Required. The message describing the error that occurred while processing + the item. + :vartype error_message: str + :ivar status_code: Required. The status code indicating why the indexing operation failed. + Possible values include: 400 for a malformed input document, 404 for document not found, 409 + for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the + service is too busy. + :vartype status_code: int + :ivar name: The name of the source at which the error originated. For example, this could refer + to a particular skill in the attached skillset. This may not be always available. + :vartype name: str + :ivar details: Additional, verbose details about the error to assist in debugging the indexer. + This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of errors. This + may not be always available. + :vartype documentation_link: str + """ + + _validation = { + 'key': {'readonly': True}, + 'error_message': {'required': True, 'readonly': True}, + 'status_code': {'required': True, 'readonly': True}, + 'name': {'readonly': True}, + 'details': {'readonly': True}, + 'documentation_link': {'readonly': True}, + } + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + 'status_code': {'key': 'statusCode', 'type': 'int'}, + 'name': {'key': 'name', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexerError, self).__init__(**kwargs) + self.key = None + self.error_message = None + self.status_code = None + self.name = None + self.details = None + self.documentation_link = None + + +class SearchIndexerLimits(msrest.serialization.Model): + """SearchIndexerLimits. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar max_run_time: The maximum duration that the indexer is permitted to run for one + execution. + :vartype max_run_time: ~datetime.timedelta + :ivar max_document_extraction_size: The maximum size of a document, in bytes, which will be + considered valid for indexing. + :vartype max_document_extraction_size: long + :ivar max_document_content_characters_to_extract: The maximum number of characters that will be + extracted from a document picked up for indexing. + :vartype max_document_content_characters_to_extract: long + """ + + _validation = { + 'max_run_time': {'readonly': True}, + 'max_document_extraction_size': {'readonly': True}, + 'max_document_content_characters_to_extract': {'readonly': True}, + } + + _attribute_map = { + 'max_run_time': {'key': 'maxRunTime', 'type': 'duration'}, + 'max_document_extraction_size': {'key': 'maxDocumentExtractionSize', 'type': 'long'}, + 'max_document_content_characters_to_extract': {'key': 'maxDocumentContentCharactersToExtract', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexerLimits, self).__init__(**kwargs) + self.max_run_time = None + self.max_document_extraction_size = None + self.max_document_content_characters_to_extract = None + + +class SearchIndexerSkillset(msrest.serialization.Model): + """A list of skills. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the skillset. + :type name: str + :param description: Required. The description of the skillset. + :type description: str + :param skills: Required. A list of skills in the skillset. + :type skills: list[~search_service_client.models.SearchIndexerSkill] + :param cognitive_services_account: Details about cognitive services to be used when running + skills. + :type cognitive_services_account: ~search_service_client.models.CognitiveServicesAccount + :param e_tag: The ETag of the skillset. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'description': {'required': True}, + 'skills': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'skills': {'key': 'skills', 'type': '[SearchIndexerSkill]'}, + 'cognitive_services_account': {'key': 'cognitiveServices', 'type': 'CognitiveServicesAccount'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexerSkillset, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.description = kwargs.get('description', None) + self.skills = kwargs.get('skills', None) + self.cognitive_services_account = kwargs.get('cognitive_services_account', None) + self.e_tag = kwargs.get('e_tag', None) + + +class SearchIndexerStatus(msrest.serialization.Model): + """Represents the current status and execution history of an indexer. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar status: Required. Overall indexer status. Possible values include: 'unknown', 'error', + 'running'. + :vartype status: str or ~search_service_client.models.IndexerStatus + :ivar last_result: The result of the most recent or an in-progress indexer execution. + :vartype last_result: ~search_service_client.models.IndexerExecutionResult + :ivar execution_history: Required. History of the recent indexer executions, sorted in reverse + chronological order. + :vartype execution_history: list[~search_service_client.models.IndexerExecutionResult] + :ivar limits: Required. The execution limits for the indexer. + :vartype limits: ~search_service_client.models.SearchIndexerLimits + """ + + _validation = { + 'status': {'required': True, 'readonly': True}, + 'last_result': {'readonly': True}, + 'execution_history': {'required': True, 'readonly': True}, + 'limits': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'last_result': {'key': 'lastResult', 'type': 'IndexerExecutionResult'}, + 'execution_history': {'key': 'executionHistory', 'type': '[IndexerExecutionResult]'}, + 'limits': {'key': 'limits', 'type': 'SearchIndexerLimits'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexerStatus, self).__init__(**kwargs) + self.status = None + self.last_result = None + self.execution_history = None + self.limits = None + + +class SearchIndexerWarning(msrest.serialization.Model): + """Represents an item-level warning. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar key: The key of the item which generated a warning. + :vartype key: str + :ivar message: Required. The message describing the warning that occurred while processing the + item. + :vartype message: str + :ivar name: The name of the source at which the warning originated. For example, this could + refer to a particular skill in the attached skillset. This may not be always available. + :vartype name: str + :ivar details: Additional, verbose details about the warning to assist in debugging the + indexer. This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of warnings. This + may not be always available. + :vartype documentation_link: str + """ + + _validation = { + 'key': {'readonly': True}, + 'message': {'required': True, 'readonly': True}, + 'name': {'readonly': True}, + 'details': {'readonly': True}, + 'documentation_link': {'readonly': True}, } _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'text_weights': {'key': 'text', 'type': 'TextWeights'}, - 'functions': {'key': 'functions', 'type': '[ScoringFunction]'}, - 'function_aggregation': {'key': 'functionAggregation', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, } def __init__( self, **kwargs ): - super(ScoringProfile, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.text_weights = kwargs.get('text_weights', None) - self.functions = kwargs.get('functions', None) - self.function_aggregation = kwargs.get('function_aggregation', None) - + super(SearchIndexerWarning, self).__init__(**kwargs) + self.key = None + self.message = None + self.name = None + self.details = None + self.documentation_link = None -class SearchError(msrest.serialization.Model): - """Describes an error condition for the Azure Cognitive Search API. - Variables are only populated by the server, and will be ignored when sending a request. +class SearchResourceEncryptionKey(msrest.serialization.Model): + """A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. All required parameters must be populated in order to send to Azure. - :ivar code: One of a server-defined set of error codes. - :vartype code: str - :ivar message: Required. A human-readable representation of the error. - :vartype message: str - :ivar details: An array of details about specific errors that led to this reported error. - :vartype details: list[~search_service_client.models.SearchError] + :param key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data + at rest. + :type key_name: str + :param key_version: Required. The version of your Azure Key Vault key to be used to encrypt + your data at rest. + :type key_version: str + :param vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that + contains the key to be used to encrypt your data at rest. An example URI might be https://my- + keyvault-name.vault.azure.net. + :type vault_uri: str + :param access_credentials: Optional Azure Active Directory credentials used for accessing your + Azure Key Vault. Not required if using managed identity instead. + :type access_credentials: + ~search_service_client.models.AzureActiveDirectoryApplicationCredentials """ _validation = { - 'code': {'readonly': True}, - 'message': {'required': True, 'readonly': True}, - 'details': {'readonly': True}, + 'key_name': {'required': True}, + 'key_version': {'required': True}, + 'vault_uri': {'required': True}, } _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'details': {'key': 'details', 'type': '[SearchError]'}, + 'key_name': {'key': 'keyVaultKeyName', 'type': 'str'}, + 'key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'}, + 'vault_uri': {'key': 'keyVaultUri', 'type': 'str'}, + 'access_credentials': {'key': 'accessCredentials', 'type': 'AzureActiveDirectoryApplicationCredentials'}, } def __init__( self, **kwargs ): - super(SearchError, self).__init__(**kwargs) - self.code = None - self.message = None - self.details = None + super(SearchResourceEncryptionKey, self).__init__(**kwargs) + self.key_name = kwargs.get('key_name', None) + self.key_version = kwargs.get('key_version', None) + self.vault_uri = kwargs.get('vault_uri', None) + self.access_credentials = kwargs.get('access_credentials', None) -class SentimentSkill(Skill): +class SentimentSkill(SearchIndexerSkill): """Text analytics positive-negative sentiment analysis, scored as a floating point value in a range of zero to 1. All required parameters must be populated in order to send to Azure. @@ -4006,7 +4288,7 @@ def __init__( self.limits = kwargs.get('limits', None) -class ShaperSkill(Skill): +class ShaperSkill(SearchIndexerSkill): """A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). All required parameters must be populated in order to send to Azure. @@ -4119,50 +4401,6 @@ def __init__( self.filter_token = kwargs.get('filter_token', "_") -class Skillset(msrest.serialization.Model): - """A list of skills. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the skillset. - :type name: str - :param description: Required. The description of the skillset. - :type description: str - :param skills: Required. A list of skills in the skillset. - :type skills: list[~search_service_client.models.Skill] - :param cognitive_services_account: Details about cognitive services to be used when running - skills. - :type cognitive_services_account: ~search_service_client.models.CognitiveServicesAccount - :param e_tag: The ETag of the skillset. - :type e_tag: str - """ - - _validation = { - 'name': {'required': True}, - 'description': {'required': True}, - 'skills': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'skills': {'key': 'skills', 'type': '[Skill]'}, - 'cognitive_services_account': {'key': 'cognitiveServices', 'type': 'CognitiveServicesAccount'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(Skillset, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.description = kwargs.get('description', None) - self.skills = kwargs.get('skills', None) - self.cognitive_services_account = kwargs.get('cognitive_services_account', None) - self.e_tag = kwargs.get('e_tag', None) - - class SnowballTokenFilter(TokenFilter): """A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. @@ -4237,7 +4475,7 @@ def __init__( self.soft_delete_marker_value = kwargs.get('soft_delete_marker_value', None) -class SplitSkill(Skill): +class SplitSkill(SearchIndexerSkill): """A skill to split a string into chunks of text. All required parameters must be populated in order to send to Azure. @@ -4326,123 +4564,6 @@ def __init__( self.odata_type = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' -class StandardAnalyzer(Analyzer): - """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str - :param max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :type max_token_length: int - :param stopwords: A list of stopwords. - :type stopwords: list[str] - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - 'max_token_length': {'maximum': 300}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, - 'stopwords': {'key': 'stopwords', 'type': '[str]'}, - } - - def __init__( - self, - **kwargs - ): - super(StandardAnalyzer, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' - self.max_token_length = kwargs.get('max_token_length', 255) - self.stopwords = kwargs.get('stopwords', None) - - -class StandardTokenizer(Tokenizer): - """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str - :param max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. - :type max_token_length: int - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(StandardTokenizer, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' - self.max_token_length = kwargs.get('max_token_length', 255) - - -class StandardTokenizerV2(Tokenizer): - """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str - :param max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :type max_token_length: int - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - 'max_token_length': {'maximum': 300}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(StandardTokenizerV2, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' - self.max_token_length = kwargs.get('max_token_length', 255) - - class StemmerOverrideTokenFilter(TokenFilter): """Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. @@ -4526,7 +4647,7 @@ def __init__( self.language = kwargs.get('language', None) -class StopAnalyzer(Analyzer): +class StopAnalyzer(LexicalAnalyzer): """Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -4681,7 +4802,7 @@ class SynonymMap(msrest.serialization.Model): needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :type encryption_key: ~search_service_client.models.EncryptionKey + :type encryption_key: ~search_service_client.models.SearchResourceEncryptionKey :param e_tag: The ETag of the synonym map. :type e_tag: str """ @@ -4696,7 +4817,7 @@ class SynonymMap(msrest.serialization.Model): 'name': {'key': 'name', 'type': 'str'}, 'format': {'key': 'format', 'type': 'str'}, 'synonyms': {'key': 'synonyms', 'type': 'str'}, - 'encryption_key': {'key': 'encryptionKey', 'type': 'EncryptionKey'}, + 'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'}, 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, } @@ -4840,7 +4961,7 @@ def __init__( self.tags_parameter = kwargs.get('tags_parameter', None) -class TextTranslationSkill(Skill): +class TextTranslationSkill(SearchIndexerSkill): """A skill to translate text from one language to another. All required parameters must be populated in order to send to Azure. @@ -4949,51 +5070,6 @@ def __init__( self.weights = kwargs.get('weights', None) -class TokenInfo(msrest.serialization.Model): - """Information about a token returned by an analyzer. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar token: Required. The token returned by the analyzer. - :vartype token: str - :ivar start_offset: Required. The index of the first character of the token in the input text. - :vartype start_offset: int - :ivar end_offset: Required. The index of the last character of the token in the input text. - :vartype end_offset: int - :ivar position: Required. The position of the token in the input text relative to other tokens. - The first token in the input text has position 0, the next has position 1, and so on. Depending - on the analyzer used, some tokens might have the same position, for example if they are - synonyms of each other. - :vartype position: int - """ - - _validation = { - 'token': {'required': True, 'readonly': True}, - 'start_offset': {'required': True, 'readonly': True}, - 'end_offset': {'required': True, 'readonly': True}, - 'position': {'required': True, 'readonly': True}, - } - - _attribute_map = { - 'token': {'key': 'token', 'type': 'str'}, - 'start_offset': {'key': 'startOffset', 'type': 'int'}, - 'end_offset': {'key': 'endOffset', 'type': 'int'}, - 'position': {'key': 'position', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(TokenInfo, self).__init__(**kwargs) - self.token = None - self.start_offset = None - self.end_offset = None - self.position = None - - class TruncateTokenFilter(TokenFilter): """Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. @@ -5031,7 +5107,7 @@ def __init__( self.length = kwargs.get('length', 300) -class UaxUrlEmailTokenizer(Tokenizer): +class UaxUrlEmailTokenizer(LexicalTokenizer): """Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -5106,7 +5182,7 @@ def __init__( self.only_on_same_position = kwargs.get('only_on_same_position', False) -class WebApiSkill(Skill): +class WebApiSkill(SearchIndexerSkill): """A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. All required parameters must be populated in order to send to Azure. diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py index 40d12418b74e..f21704937959 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py @@ -11,74 +11,49 @@ import msrest.serialization -class AccessCondition(msrest.serialization.Model): - """Parameter group. - - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_match': {'key': 'If-Match', 'type': 'str'}, - 'if_none_match': {'key': 'If-None-Match', 'type': 'str'}, - } - - def __init__( - self, - *, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs - ): - super(AccessCondition, self).__init__(**kwargs) - self.if_match = if_match - self.if_none_match = if_none_match - - -class Analyzer(msrest.serialization.Model): - """Base type for analyzers. +class AnalyzedTokenInfo(msrest.serialization.Model): + """Information about a token returned by an analyzer. - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: CustomAnalyzer, PatternAnalyzer, StandardAnalyzer, StopAnalyzer. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str + :ivar token: Required. The token returned by the analyzer. + :vartype token: str + :ivar start_offset: Required. The index of the first character of the token in the input text. + :vartype start_offset: int + :ivar end_offset: Required. The index of the last character of the token in the input text. + :vartype end_offset: int + :ivar position: Required. The position of the token in the input text relative to other tokens. + The first token in the input text has position 0, the next has position 1, and so on. Depending + on the analyzer used, some tokens might have the same position, for example if they are + synonyms of each other. + :vartype position: int """ _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, + 'token': {'required': True, 'readonly': True}, + 'start_offset': {'required': True, 'readonly': True}, + 'end_offset': {'required': True, 'readonly': True}, + 'position': {'required': True, 'readonly': True}, } _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'odata_type': {'#Microsoft.Azure.Search.CustomAnalyzer': 'CustomAnalyzer', '#Microsoft.Azure.Search.PatternAnalyzer': 'PatternAnalyzer', '#Microsoft.Azure.Search.StandardAnalyzer': 'StandardAnalyzer', '#Microsoft.Azure.Search.StopAnalyzer': 'StopAnalyzer'} + 'token': {'key': 'token', 'type': 'str'}, + 'start_offset': {'key': 'startOffset', 'type': 'int'}, + 'end_offset': {'key': 'endOffset', 'type': 'int'}, + 'position': {'key': 'position', 'type': 'int'}, } def __init__( self, - *, - name: str, **kwargs ): - super(Analyzer, self).__init__(**kwargs) - self.odata_type = None - self.name = name + super(AnalyzedTokenInfo, self).__init__(**kwargs) + self.token = None + self.start_offset = None + self.end_offset = None + self.position = None class AnalyzeRequest(msrest.serialization.Model): @@ -107,13 +82,13 @@ class AnalyzeRequest(msrest.serialization.Model): 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', 'whitespace'. - :type analyzer: str or ~search_service_client.models.AnalyzerName + :type analyzer: str or ~search_service_client.models.LexicalAnalyzerName :param tokenizer: The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. Possible values include: 'classic', 'edgeNGram', 'keyword_v2', 'letter', 'lowercase', 'microsoft_language_tokenizer', 'microsoft_language_stemming_tokenizer', 'nGram', 'path_hierarchy_v2', 'pattern', 'standard_v2', 'uax_url_email', 'whitespace'. - :type tokenizer: str or ~search_service_client.models.TokenizerName + :type tokenizer: str or ~search_service_client.models.LexicalTokenizerName :param token_filters: An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. :type token_filters: list[str or ~search_service_client.models.TokenFilterName] @@ -138,8 +113,8 @@ def __init__( self, *, text: str, - analyzer: Optional[Union[str, "AnalyzerName"]] = None, - tokenizer: Optional[Union[str, "TokenizerName"]] = None, + analyzer: Optional[Union[str, "LexicalAnalyzerName"]] = None, + tokenizer: Optional[Union[str, "LexicalTokenizerName"]] = None, token_filters: Optional[List[Union[str, "TokenFilterName"]]] = None, char_filters: Optional[List[str]] = None, **kwargs @@ -158,7 +133,7 @@ class AnalyzeResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param tokens: Required. The list of tokens returned by the analyzer specified in the request. - :type tokens: list[~search_service_client.models.TokenInfo] + :type tokens: list[~search_service_client.models.AnalyzedTokenInfo] """ _validation = { @@ -166,13 +141,13 @@ class AnalyzeResult(msrest.serialization.Model): } _attribute_map = { - 'tokens': {'key': 'tokens', 'type': '[TokenInfo]'}, + 'tokens': {'key': 'tokens', 'type': '[AnalyzedTokenInfo]'}, } def __init__( self, *, - tokens: List["TokenInfo"], + tokens: List["AnalyzedTokenInfo"], **kwargs ): super(AnalyzeResult, self).__init__(**kwargs) @@ -295,6 +270,78 @@ def __init__( self.application_secret = application_secret +class Similarity(msrest.serialization.Model): + """Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: BM25Similarity, ClassicSimilarity. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.BM25Similarity': 'BM25Similarity', '#Microsoft.Azure.Search.ClassicSimilarity': 'ClassicSimilarity'} + } + + def __init__( + self, + **kwargs + ): + super(Similarity, self).__init__(**kwargs) + self.odata_type = None + + +class BM25Similarity(Similarity): + """Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param k1: This property controls the scaling function between the term frequency of each + matching terms and the final relevance score of a document-query pair. By default, a value of + 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. + :type k1: float + :param b: This property controls how the length of a document affects the relevance score. By + default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, + while a value of 1.0 means the score is fully normalized by the length of the document. + :type b: float + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'k1': {'key': 'k1', 'type': 'float'}, + 'b': {'key': 'b', 'type': 'float'}, + } + + def __init__( + self, + *, + k1: Optional[float] = None, + b: Optional[float] = None, + **kwargs + ): + super(BM25Similarity, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.BM25Similarity' + self.k1 = k1 + self.b = b + + class CharFilter(msrest.serialization.Model): """Base type for character filters. @@ -338,7 +385,7 @@ def __init__( class CjkBigramTokenFilter(TokenFilter): - """Forms bigrams of CJK terms that are generated from StandardTokenizer. This token filter is implemented using Apache Lucene. + """Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -382,11 +429,36 @@ def __init__( self.output_unigrams = output_unigrams -class Tokenizer(msrest.serialization.Model): +class ClassicSimilarity(Similarity): + """Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ClassicSimilarity, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.ClassicSimilarity' + + +class LexicalTokenizer(msrest.serialization.Model): """Base type for tokenizers. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, PathHierarchyTokenizerV2, PatternTokenizer, StandardTokenizer, StandardTokenizerV2, UaxUrlEmailTokenizer. + sub-classes are: ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, PathHierarchyTokenizerV2, PatternTokenizer, LuceneStandardTokenizer, LuceneStandardTokenizerV2, UaxUrlEmailTokenizer. All required parameters must be populated in order to send to Azure. @@ -410,7 +482,7 @@ class Tokenizer(msrest.serialization.Model): } _subtype_map = { - 'odata_type': {'#Microsoft.Azure.Search.ClassicTokenizer': 'ClassicTokenizer', '#Microsoft.Azure.Search.EdgeNGramTokenizer': 'EdgeNGramTokenizer', '#Microsoft.Azure.Search.KeywordTokenizer': 'KeywordTokenizer', '#Microsoft.Azure.Search.KeywordTokenizerV2': 'KeywordTokenizerV2', '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer': 'MicrosoftLanguageStemmingTokenizer', '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer': 'MicrosoftLanguageTokenizer', '#Microsoft.Azure.Search.NGramTokenizer': 'NGramTokenizer', '#Microsoft.Azure.Search.PathHierarchyTokenizerV2': 'PathHierarchyTokenizerV2', '#Microsoft.Azure.Search.PatternTokenizer': 'PatternTokenizer', '#Microsoft.Azure.Search.StandardTokenizer': 'StandardTokenizer', '#Microsoft.Azure.Search.StandardTokenizerV2': 'StandardTokenizerV2', '#Microsoft.Azure.Search.UaxUrlEmailTokenizer': 'UaxUrlEmailTokenizer'} + 'odata_type': {'#Microsoft.Azure.Search.ClassicTokenizer': 'ClassicTokenizer', '#Microsoft.Azure.Search.EdgeNGramTokenizer': 'EdgeNGramTokenizer', '#Microsoft.Azure.Search.KeywordTokenizer': 'KeywordTokenizer', '#Microsoft.Azure.Search.KeywordTokenizerV2': 'KeywordTokenizerV2', '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer': 'MicrosoftLanguageStemmingTokenizer', '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer': 'MicrosoftLanguageTokenizer', '#Microsoft.Azure.Search.NGramTokenizer': 'NGramTokenizer', '#Microsoft.Azure.Search.PathHierarchyTokenizerV2': 'PathHierarchyTokenizerV2', '#Microsoft.Azure.Search.PatternTokenizer': 'PatternTokenizer', '#Microsoft.Azure.Search.StandardTokenizer': 'LuceneStandardTokenizer', '#Microsoft.Azure.Search.StandardTokenizerV2': 'LuceneStandardTokenizerV2', '#Microsoft.Azure.Search.UaxUrlEmailTokenizer': 'UaxUrlEmailTokenizer'} } def __init__( @@ -419,12 +491,12 @@ def __init__( name: str, **kwargs ): - super(Tokenizer, self).__init__(**kwargs) + super(LexicalTokenizer, self).__init__(**kwargs) self.odata_type = None self.name = name -class ClassicTokenizer(Tokenizer): +class ClassicTokenizer(LexicalTokenizer): """Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -595,7 +667,7 @@ def __init__( self.use_query_mode = use_query_mode -class Skill(msrest.serialization.Model): +class SearchIndexerSkill(msrest.serialization.Model): """Base type for skills. You probably want to use the sub-classes and not this class directly. Known @@ -653,7 +725,7 @@ def __init__( context: Optional[str] = None, **kwargs ): - super(Skill, self).__init__(**kwargs) + super(SearchIndexerSkill, self).__init__(**kwargs) self.odata_type = None self.name = name self.description = description @@ -662,7 +734,7 @@ def __init__( self.outputs = outputs -class ConditionalSkill(Skill): +class ConditionalSkill(SearchIndexerSkill): """A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. All required parameters must be populated in order to send to Azure. @@ -752,7 +824,49 @@ def __init__( self.max_age_in_seconds = max_age_in_seconds -class CustomAnalyzer(Analyzer): +class LexicalAnalyzer(msrest.serialization.Model): + """Base type for analyzers. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: CustomAnalyzer, PatternAnalyzer, LuceneStandardAnalyzer, StopAnalyzer. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.CustomAnalyzer': 'CustomAnalyzer', '#Microsoft.Azure.Search.PatternAnalyzer': 'PatternAnalyzer', '#Microsoft.Azure.Search.StandardAnalyzer': 'LuceneStandardAnalyzer', '#Microsoft.Azure.Search.StopAnalyzer': 'StopAnalyzer'} + } + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(LexicalAnalyzer, self).__init__(**kwargs) + self.odata_type = None + self.name = name + + +class CustomAnalyzer(LexicalAnalyzer): """Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. All required parameters must be populated in order to send to Azure. @@ -769,7 +883,7 @@ class CustomAnalyzer(Analyzer): 'edgeNGram', 'keyword_v2', 'letter', 'lowercase', 'microsoft_language_tokenizer', 'microsoft_language_stemming_tokenizer', 'nGram', 'path_hierarchy_v2', 'pattern', 'standard_v2', 'uax_url_email', 'whitespace'. - :type tokenizer: str or ~search_service_client.models.TokenizerName + :type tokenizer: str or ~search_service_client.models.LexicalTokenizerName :param token_filters: A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. @@ -798,7 +912,7 @@ def __init__( self, *, name: str, - tokenizer: Union[str, "TokenizerName"], + tokenizer: Union[str, "LexicalTokenizerName"], token_filters: Optional[List[Union[str, "TokenFilterName"]]] = None, char_filters: Optional[List[str]] = None, **kwargs @@ -843,40 +957,6 @@ def __init__( self.odata_type = None -class DataContainer(msrest.serialization.Model): - """Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the table or view (for Azure SQL data source) or collection - (for CosmosDB data source) that will be indexed. - :type name: str - :param query: A query that is applied to this data container. The syntax and meaning of this - parameter is datasource-specific. Not supported by Azure SQL datasources. - :type query: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'query': {'key': 'query', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - query: Optional[str] = None, - **kwargs - ): - super(DataContainer, self).__init__(**kwargs) - self.name = name - self.query = query - - class DataDeletionDetectionPolicy(msrest.serialization.Model): """Base type for data deletion detection policies. @@ -910,72 +990,6 @@ def __init__( self.odata_type = None -class DataSource(msrest.serialization.Model): - """Represents a datasource definition, which can be used to configure an indexer. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the datasource. - :type name: str - :param description: The description of the datasource. - :type description: str - :param type: Required. The type of the datasource. Possible values include: 'azuresql', - 'cosmosdb', 'azureblob', 'azuretable', 'mysql'. - :type type: str or ~search_service_client.models.DataSourceType - :param credentials: Required. Credentials for the datasource. - :type credentials: ~search_service_client.models.DataSourceCredentials - :param container: Required. The data container for the datasource. - :type container: ~search_service_client.models.DataContainer - :param data_change_detection_policy: The data change detection policy for the datasource. - :type data_change_detection_policy: ~search_service_client.models.DataChangeDetectionPolicy - :param data_deletion_detection_policy: The data deletion detection policy for the datasource. - :type data_deletion_detection_policy: ~search_service_client.models.DataDeletionDetectionPolicy - :param e_tag: The ETag of the DataSource. - :type e_tag: str - """ - - _validation = { - 'name': {'required': True}, - 'type': {'required': True}, - 'credentials': {'required': True}, - 'container': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'DataSourceCredentials'}, - 'container': {'key': 'container', 'type': 'DataContainer'}, - 'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'}, - 'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - type: Union[str, "DataSourceType"], - credentials: "DataSourceCredentials", - container: "DataContainer", - description: Optional[str] = None, - data_change_detection_policy: Optional["DataChangeDetectionPolicy"] = None, - data_deletion_detection_policy: Optional["DataDeletionDetectionPolicy"] = None, - e_tag: Optional[str] = None, - **kwargs - ): - super(DataSource, self).__init__(**kwargs) - self.name = name - self.description = description - self.type = type - self.credentials = credentials - self.container = container - self.data_change_detection_policy = data_change_detection_policy - self.data_deletion_detection_policy = data_deletion_detection_policy - self.e_tag = e_tag - - class DataSourceCredentials(msrest.serialization.Model): """Represents credentials that can be used to connect to a datasource. @@ -1338,7 +1352,7 @@ def __init__( self.side = side -class EdgeNGramTokenizer(Tokenizer): +class EdgeNGramTokenizer(LexicalTokenizer): """Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -1429,58 +1443,8 @@ def __init__( self.articles = articles -class EncryptionKey(msrest.serialization.Model): - """A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. - - All required parameters must be populated in order to send to Azure. - - :param key_vault_key_name: Required. The name of your Azure Key Vault key to be used to encrypt - your data at rest. - :type key_vault_key_name: str - :param key_vault_key_version: Required. The version of your Azure Key Vault key to be used to - encrypt your data at rest. - :type key_vault_key_version: str - :param key_vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, - that contains the key to be used to encrypt your data at rest. An example URI might be - https://my-keyvault-name.vault.azure.net. - :type key_vault_uri: str - :param access_credentials: Optional Azure Active Directory credentials used for accessing your - Azure Key Vault. Not required if using managed identity instead. - :type access_credentials: - ~search_service_client.models.AzureActiveDirectoryApplicationCredentials - """ - - _validation = { - 'key_vault_key_name': {'required': True}, - 'key_vault_key_version': {'required': True}, - 'key_vault_uri': {'required': True}, - } - - _attribute_map = { - 'key_vault_key_name': {'key': 'keyVaultKeyName', 'type': 'str'}, - 'key_vault_key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'}, - 'key_vault_uri': {'key': 'keyVaultUri', 'type': 'str'}, - 'access_credentials': {'key': 'accessCredentials', 'type': 'AzureActiveDirectoryApplicationCredentials'}, - } - - def __init__( - self, - *, - key_vault_key_name: str, - key_vault_key_version: str, - key_vault_uri: str, - access_credentials: Optional["AzureActiveDirectoryApplicationCredentials"] = None, - **kwargs - ): - super(EncryptionKey, self).__init__(**kwargs) - self.key_vault_key_name = key_vault_key_name - self.key_vault_key_version = key_vault_key_version - self.key_vault_uri = key_vault_uri - self.access_credentials = access_credentials - - -class EntityRecognitionSkill(Skill): - """Text analytics entity recognition. +class EntityRecognitionSkill(SearchIndexerSkill): + """Text analytics entity recognition. All required parameters must be populated in order to send to Azure. @@ -1562,198 +1526,6 @@ def __init__( self.minimum_precision = minimum_precision -class Field(msrest.serialization.Model): - """Represents a field in an index definition, which describes the name, data type, and search behavior of a field. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the field, which must be unique within the fields collection - of the index or parent field. - :type name: str - :param type: Required. The data type of the field. Possible values include: 'Edm.String', - 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', - 'Edm.GeographyPoint', 'Edm.ComplexType'. - :type type: str or ~search_service_client.models.DataType - :param key: A value indicating whether the field uniquely identifies documents in the index. - Exactly one top-level field in each index must be chosen as the key field and it must be of - type Edm.String. Key fields can be used to look up documents directly and update or delete - specific documents. Default is false for simple fields and null for complex fields. - :type key: bool - :param retrievable: A value indicating whether the field can be returned in a search result. - You can disable this option if you want to use a field (for example, margin) as a filter, - sorting, or scoring mechanism but do not want the field to be visible to the end user. This - property must be true for key fields, and it must be null for complex fields. This property can - be changed on existing fields. Enabling this property does not cause any increase in index - storage requirements. Default is true for simple fields and null for complex fields. - :type retrievable: bool - :param searchable: A value indicating whether the field is full-text searchable. This means it - will undergo analysis such as word-breaking during indexing. If you set a searchable field to a - value like "sunny day", internally it will be split into the individual tokens "sunny" and - "day". This enables full-text searches for these terms. Fields of type Edm.String or - Collection(Edm.String) are searchable by default. This property must be false for simple fields - of other non-string data types, and it must be null for complex fields. Note: searchable fields - consume extra space in your index since Azure Cognitive Search will store an additional - tokenized version of the field value for full-text searches. If you want to save space in your - index and you don't need a field to be included in searches, set searchable to false. - :type searchable: bool - :param filterable: A value indicating whether to enable the field to be referenced in $filter - queries. filterable differs from searchable in how strings are handled. Fields of type - Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so - comparisons are for exact matches only. For example, if you set such a field f to "sunny day", - $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property - must be null for complex fields. Default is true for simple fields and null for complex fields. - :type filterable: bool - :param sortable: A value indicating whether to enable the field to be referenced in $orderby - expressions. By default Azure Cognitive Search sorts results by score, but in many experiences - users will want to sort by fields in the documents. A simple field can be sortable only if it - is single-valued (it has a single value in the scope of the parent document). Simple collection - fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex - collections are also multi-valued, and therefore cannot be sortable. This is true whether it's - an immediate parent field, or an ancestor field, that's the complex collection. Complex fields - cannot be sortable and the sortable property must be null for such fields. The default for - sortable is true for single-valued simple fields, false for multi-valued simple fields, and - null for complex fields. - :type sortable: bool - :param facetable: A value indicating whether to enable the field to be referenced in facet - queries. Typically used in a presentation of search results that includes hit count by category - (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so - on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or - Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple - fields. - :type facetable: bool - :param analyzer: The name of the analyzer to use for the field. This option can be used only - with searchable fields and it can't be set together with either searchAnalyzer or - indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null - for complex fields. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', - 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- - Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', - 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', - 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', - 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', - 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', - 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', - 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', - 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', - 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- - PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', - 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', - 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', - 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', - 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', - 'simple', 'stop', 'whitespace'. - :type analyzer: str or ~search_service_client.models.AnalyzerName - :param search_analyzer: The name of the analyzer used at search time for the field. This option - can be used only with searchable fields. It must be set together with indexAnalyzer and it - cannot be set together with the analyzer option. This property cannot be set to the name of a - language analyzer; use the analyzer property instead if you need a language analyzer. This - analyzer can be updated on an existing field. Must be null for complex fields. Possible values - include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft', - 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh-Hans.lucene', 'zh- - Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', 'cs.lucene', 'da.microsoft', - 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', 'en.lucene', 'et.microsoft', - 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', 'gl.lucene', 'de.microsoft', - 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', 'he.microsoft', 'hi.microsoft', - 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', 'id.microsoft', 'id.lucene', - 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', 'ja.lucene', 'kn.microsoft', - 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', 'lt.microsoft', 'ml.microsoft', - 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', 'fa.lucene', 'pl.microsoft', - 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', 'pt-PT.lucene', - 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- - cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', - 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', - 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', - 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', - 'whitespace'. - :type search_analyzer: str or ~search_service_client.models.AnalyzerName - :param index_analyzer: The name of the analyzer used at indexing time for the field. This - option can be used only with searchable fields. It must be set together with searchAnalyzer and - it cannot be set together with the analyzer option. This property cannot be set to the name of - a language analyzer; use the analyzer property instead if you need a language analyzer. Once - the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. - Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', - 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh- - Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', - 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', - 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', - 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', - 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', - 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', - 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', - 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', - 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', - 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- - cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', - 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', - 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', - 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', - 'whitespace'. - :type index_analyzer: str or ~search_service_client.models.AnalyzerName - :param synonym_maps: A list of the names of synonym maps to associate with this field. This - option can be used only with searchable fields. Currently only one synonym map per field is - supported. Assigning a synonym map to a field ensures that query terms targeting that field are - expanded at query-time using the rules in the synonym map. This attribute can be changed on - existing fields. Must be null or an empty collection for complex fields. - :type synonym_maps: list[str] - :param fields: A list of sub-fields if this is a field of type Edm.ComplexType or - Collection(Edm.ComplexType). Must be null or empty for simple fields. - :type fields: list[~search_service_client.models.Field] - """ - - _validation = { - 'name': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'key': {'key': 'key', 'type': 'bool'}, - 'retrievable': {'key': 'retrievable', 'type': 'bool'}, - 'searchable': {'key': 'searchable', 'type': 'bool'}, - 'filterable': {'key': 'filterable', 'type': 'bool'}, - 'sortable': {'key': 'sortable', 'type': 'bool'}, - 'facetable': {'key': 'facetable', 'type': 'bool'}, - 'analyzer': {'key': 'analyzer', 'type': 'str'}, - 'search_analyzer': {'key': 'searchAnalyzer', 'type': 'str'}, - 'index_analyzer': {'key': 'indexAnalyzer', 'type': 'str'}, - 'synonym_maps': {'key': 'synonymMaps', 'type': '[str]'}, - 'fields': {'key': 'fields', 'type': '[Field]'}, - } - - def __init__( - self, - *, - name: str, - type: Union[str, "DataType"], - key: Optional[bool] = None, - retrievable: Optional[bool] = None, - searchable: Optional[bool] = None, - filterable: Optional[bool] = None, - sortable: Optional[bool] = None, - facetable: Optional[bool] = None, - analyzer: Optional[Union[str, "AnalyzerName"]] = None, - search_analyzer: Optional[Union[str, "AnalyzerName"]] = None, - index_analyzer: Optional[Union[str, "AnalyzerName"]] = None, - synonym_maps: Optional[List[str]] = None, - fields: Optional[List["Field"]] = None, - **kwargs - ): - super(Field, self).__init__(**kwargs) - self.name = name - self.type = type - self.key = key - self.retrievable = retrievable - self.searchable = searchable - self.filterable = filterable - self.sortable = sortable - self.facetable = facetable - self.analyzer = analyzer - self.search_analyzer = search_analyzer - self.index_analyzer = index_analyzer - self.synonym_maps = synonym_maps - self.fields = fields - - class FieldMapping(msrest.serialization.Model): """Defines a mapping between a field in a data source and a target field in an index. @@ -1968,7 +1740,7 @@ def __init__( self.high_water_mark_column_name = high_water_mark_column_name -class ImageAnalysisSkill(Skill): +class ImageAnalysisSkill(SearchIndexerSkill): """A skill that analyzes image files. It extracts a rich set of visual features based on the image content. All required parameters must be populated in order to send to Azure. @@ -2039,230 +1811,12 @@ def __init__( self.details = details -class Index(msrest.serialization.Model): - """Represents a search index definition, which describes the fields and search behavior of an index. - - All required parameters must be populated in order to send to Azure. +class IndexerExecutionResult(msrest.serialization.Model): + """Represents the result of an individual indexer execution. - :param name: Required. The name of the index. - :type name: str - :param fields: Required. The fields of the index. - :type fields: list[~search_service_client.models.Field] - :param scoring_profiles: The scoring profiles for the index. - :type scoring_profiles: list[~search_service_client.models.ScoringProfile] - :param default_scoring_profile: The name of the scoring profile to use if none is specified in - the query. If this property is not set and no scoring profile is specified in the query, then - default scoring (tf-idf) will be used. - :type default_scoring_profile: str - :param cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. - :type cors_options: ~search_service_client.models.CorsOptions - :param suggesters: The suggesters for the index. - :type suggesters: list[~search_service_client.models.Suggester] - :param analyzers: The analyzers for the index. - :type analyzers: list[~search_service_client.models.Analyzer] - :param tokenizers: The tokenizers for the index. - :type tokenizers: list[~search_service_client.models.Tokenizer] - :param token_filters: The token filters for the index. - :type token_filters: list[~search_service_client.models.TokenFilter] - :param char_filters: The character filters for the index. - :type char_filters: list[~search_service_client.models.CharFilter] - :param encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your data when you - want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive - Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive - Search will ignore attempts to set this property to null. You can change this property as - needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with - customer-managed keys is not available for free search services, and is only available for paid - services created on or after January 1, 2019. - :type encryption_key: ~search_service_client.models.EncryptionKey - :param e_tag: The ETag of the index. - :type e_tag: str - """ + Variables are only populated by the server, and will be ignored when sending a request. - _validation = { - 'name': {'required': True}, - 'fields': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'fields': {'key': 'fields', 'type': '[Field]'}, - 'scoring_profiles': {'key': 'scoringProfiles', 'type': '[ScoringProfile]'}, - 'default_scoring_profile': {'key': 'defaultScoringProfile', 'type': 'str'}, - 'cors_options': {'key': 'corsOptions', 'type': 'CorsOptions'}, - 'suggesters': {'key': 'suggesters', 'type': '[Suggester]'}, - 'analyzers': {'key': 'analyzers', 'type': '[Analyzer]'}, - 'tokenizers': {'key': 'tokenizers', 'type': '[Tokenizer]'}, - 'token_filters': {'key': 'tokenFilters', 'type': '[TokenFilter]'}, - 'char_filters': {'key': 'charFilters', 'type': '[CharFilter]'}, - 'encryption_key': {'key': 'encryptionKey', 'type': 'EncryptionKey'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - fields: List["Field"], - scoring_profiles: Optional[List["ScoringProfile"]] = None, - default_scoring_profile: Optional[str] = None, - cors_options: Optional["CorsOptions"] = None, - suggesters: Optional[List["Suggester"]] = None, - analyzers: Optional[List["Analyzer"]] = None, - tokenizers: Optional[List["Tokenizer"]] = None, - token_filters: Optional[List["TokenFilter"]] = None, - char_filters: Optional[List["CharFilter"]] = None, - encryption_key: Optional["EncryptionKey"] = None, - e_tag: Optional[str] = None, - **kwargs - ): - super(Index, self).__init__(**kwargs) - self.name = name - self.fields = fields - self.scoring_profiles = scoring_profiles - self.default_scoring_profile = default_scoring_profile - self.cors_options = cors_options - self.suggesters = suggesters - self.analyzers = analyzers - self.tokenizers = tokenizers - self.token_filters = token_filters - self.char_filters = char_filters - self.encryption_key = encryption_key - self.e_tag = e_tag - - -class Indexer(msrest.serialization.Model): - """Represents an indexer. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the indexer. - :type name: str - :param description: The description of the indexer. - :type description: str - :param data_source_name: Required. The name of the datasource from which this indexer reads - data. - :type data_source_name: str - :param skillset_name: The name of the skillset executing with this indexer. - :type skillset_name: str - :param target_index_name: Required. The name of the index to which this indexer writes data. - :type target_index_name: str - :param schedule: The schedule for this indexer. - :type schedule: ~search_service_client.models.IndexingSchedule - :param parameters: Parameters for indexer execution. - :type parameters: ~search_service_client.models.IndexingParameters - :param field_mappings: Defines mappings between fields in the data source and corresponding - target fields in the index. - :type field_mappings: list[~search_service_client.models.FieldMapping] - :param output_field_mappings: Output field mappings are applied after enrichment and - immediately before indexing. - :type output_field_mappings: list[~search_service_client.models.FieldMapping] - :param is_disabled: A value indicating whether the indexer is disabled. Default is false. - :type is_disabled: bool - :param e_tag: The ETag of the Indexer. - :type e_tag: str - """ - - _validation = { - 'name': {'required': True}, - 'data_source_name': {'required': True}, - 'target_index_name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'data_source_name': {'key': 'dataSourceName', 'type': 'str'}, - 'skillset_name': {'key': 'skillsetName', 'type': 'str'}, - 'target_index_name': {'key': 'targetIndexName', 'type': 'str'}, - 'schedule': {'key': 'schedule', 'type': 'IndexingSchedule'}, - 'parameters': {'key': 'parameters', 'type': 'IndexingParameters'}, - 'field_mappings': {'key': 'fieldMappings', 'type': '[FieldMapping]'}, - 'output_field_mappings': {'key': 'outputFieldMappings', 'type': '[FieldMapping]'}, - 'is_disabled': {'key': 'disabled', 'type': 'bool'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - data_source_name: str, - target_index_name: str, - description: Optional[str] = None, - skillset_name: Optional[str] = None, - schedule: Optional["IndexingSchedule"] = None, - parameters: Optional["IndexingParameters"] = None, - field_mappings: Optional[List["FieldMapping"]] = None, - output_field_mappings: Optional[List["FieldMapping"]] = None, - is_disabled: Optional[bool] = False, - e_tag: Optional[str] = None, - **kwargs - ): - super(Indexer, self).__init__(**kwargs) - self.name = name - self.description = description - self.data_source_name = data_source_name - self.skillset_name = skillset_name - self.target_index_name = target_index_name - self.schedule = schedule - self.parameters = parameters - self.field_mappings = field_mappings - self.output_field_mappings = output_field_mappings - self.is_disabled = is_disabled - self.e_tag = e_tag - - -class IndexerExecutionInfo(msrest.serialization.Model): - """Represents the current status and execution history of an indexer. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar status: Required. Overall indexer status. Possible values include: 'unknown', 'error', - 'running'. - :vartype status: str or ~search_service_client.models.IndexerStatus - :ivar last_result: The result of the most recent or an in-progress indexer execution. - :vartype last_result: ~search_service_client.models.IndexerExecutionResult - :ivar execution_history: Required. History of the recent indexer executions, sorted in reverse - chronological order. - :vartype execution_history: list[~search_service_client.models.IndexerExecutionResult] - :ivar limits: Required. The execution limits for the indexer. - :vartype limits: ~search_service_client.models.IndexerLimits - """ - - _validation = { - 'status': {'required': True, 'readonly': True}, - 'last_result': {'readonly': True}, - 'execution_history': {'required': True, 'readonly': True}, - 'limits': {'required': True, 'readonly': True}, - } - - _attribute_map = { - 'status': {'key': 'status', 'type': 'str'}, - 'last_result': {'key': 'lastResult', 'type': 'IndexerExecutionResult'}, - 'execution_history': {'key': 'executionHistory', 'type': '[IndexerExecutionResult]'}, - 'limits': {'key': 'limits', 'type': 'IndexerLimits'}, - } - - def __init__( - self, - **kwargs - ): - super(IndexerExecutionInfo, self).__init__(**kwargs) - self.status = None - self.last_result = None - self.execution_history = None - self.limits = None - - -class IndexerExecutionResult(msrest.serialization.Model): - """Represents the result of an individual indexer execution. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. + All required parameters must be populated in order to send to Azure. :ivar status: Required. The outcome of this indexer execution. Possible values include: 'transientFailure', 'success', 'inProgress', 'reset'. @@ -2274,9 +1828,9 @@ class IndexerExecutionResult(msrest.serialization.Model): :ivar end_time: The end time of this indexer execution, if the execution has already completed. :vartype end_time: ~datetime.datetime :ivar errors: Required. The item-level indexing errors. - :vartype errors: list[~search_service_client.models.ItemError] + :vartype errors: list[~search_service_client.models.SearchIndexerError] :ivar warnings: Required. The item-level indexing warnings. - :vartype warnings: list[~search_service_client.models.ItemWarning] + :vartype warnings: list[~search_service_client.models.SearchIndexerWarning] :ivar item_count: Required. The number of items that were processed during this indexer execution. This includes both successfully processed items and items where indexing was attempted but failed. @@ -2308,8 +1862,8 @@ class IndexerExecutionResult(msrest.serialization.Model): 'error_message': {'key': 'errorMessage', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'errors': {'key': 'errors', 'type': '[ItemError]'}, - 'warnings': {'key': 'warnings', 'type': '[ItemWarning]'}, + 'errors': {'key': 'errors', 'type': '[SearchIndexerError]'}, + 'warnings': {'key': 'warnings', 'type': '[SearchIndexerWarning]'}, 'item_count': {'key': 'itemsProcessed', 'type': 'int'}, 'failed_item_count': {'key': 'itemsFailed', 'type': 'int'}, 'initial_tracking_state': {'key': 'initialTrackingState', 'type': 'str'}, @@ -2333,44 +1887,6 @@ def __init__( self.final_tracking_state = None -class IndexerLimits(msrest.serialization.Model): - """IndexerLimits. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar max_run_time: The maximum duration that the indexer is permitted to run for one - execution. - :vartype max_run_time: ~datetime.timedelta - :ivar max_document_extraction_size: The maximum size of a document, in bytes, which will be - considered valid for indexing. - :vartype max_document_extraction_size: long - :ivar max_document_content_characters_to_extract: The maximum number of characters that will be - extracted from a document picked up for indexing. - :vartype max_document_content_characters_to_extract: long - """ - - _validation = { - 'max_run_time': {'readonly': True}, - 'max_document_extraction_size': {'readonly': True}, - 'max_document_content_characters_to_extract': {'readonly': True}, - } - - _attribute_map = { - 'max_run_time': {'key': 'maxRunTime', 'type': 'duration'}, - 'max_document_extraction_size': {'key': 'maxDocumentExtractionSize', 'type': 'long'}, - 'max_document_content_characters_to_extract': {'key': 'maxDocumentContentCharactersToExtract', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(IndexerLimits, self).__init__(**kwargs) - self.max_run_time = None - self.max_document_extraction_size = None - self.max_document_content_characters_to_extract = None - - class IndexingParameters(msrest.serialization.Model): """Represents parameters for indexer execution. @@ -2485,154 +2001,44 @@ def __init__( self.inputs = inputs -class ItemError(msrest.serialization.Model): - """Represents an item- or document-level indexing error. - - Variables are only populated by the server, and will be ignored when sending a request. +class KeepTokenFilter(TokenFilter): + """A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. - :ivar key: The key of the item for which indexing failed. - :vartype key: str - :ivar error_message: Required. The message describing the error that occurred while processing - the item. - :vartype error_message: str - :ivar status_code: Required. The status code indicating why the indexing operation failed. - Possible values include: 400 for a malformed input document, 404 for document not found, 409 - for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the - service is too busy. - :vartype status_code: int - :ivar name: The name of the source at which the error originated. For example, this could refer - to a particular skill in the attached skillset. This may not be always available. - :vartype name: str - :ivar details: Additional, verbose details about the error to assist in debugging the indexer. - This may not be always available. - :vartype details: str - :ivar documentation_link: A link to a troubleshooting guide for these classes of errors. This - may not be always available. - :vartype documentation_link: str + :param odata_type: Required. Identifies the concrete type of the token filter.Constant filled + by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param keep_words: Required. The list of words to keep. + :type keep_words: list[str] + :param lower_case_keep_words: A value indicating whether to lower case all words first. Default + is false. + :type lower_case_keep_words: bool """ _validation = { - 'key': {'readonly': True}, - 'error_message': {'required': True, 'readonly': True}, - 'status_code': {'required': True, 'readonly': True}, - 'name': {'readonly': True}, - 'details': {'readonly': True}, - 'documentation_link': {'readonly': True}, + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'keep_words': {'required': True}, } _attribute_map = { - 'key': {'key': 'key', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - 'status_code': {'key': 'statusCode', 'type': 'int'}, + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, + 'keep_words': {'key': 'keepWords', 'type': '[str]'}, + 'lower_case_keep_words': {'key': 'keepWordsCase', 'type': 'bool'}, } def __init__( self, - **kwargs - ): - super(ItemError, self).__init__(**kwargs) - self.key = None - self.error_message = None - self.status_code = None - self.name = None - self.details = None - self.documentation_link = None - - -class ItemWarning(msrest.serialization.Model): - """Represents an item-level warning. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar key: The key of the item which generated a warning. - :vartype key: str - :ivar message: Required. The message describing the warning that occurred while processing the - item. - :vartype message: str - :ivar name: The name of the source at which the warning originated. For example, this could - refer to a particular skill in the attached skillset. This may not be always available. - :vartype name: str - :ivar details: Additional, verbose details about the warning to assist in debugging the - indexer. This may not be always available. - :vartype details: str - :ivar documentation_link: A link to a troubleshooting guide for these classes of warnings. This - may not be always available. - :vartype documentation_link: str - """ - - _validation = { - 'key': {'readonly': True}, - 'message': {'required': True, 'readonly': True}, - 'name': {'readonly': True}, - 'details': {'readonly': True}, - 'documentation_link': {'readonly': True}, - } - - _attribute_map = { - 'key': {'key': 'key', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ItemWarning, self).__init__(**kwargs) - self.key = None - self.message = None - self.name = None - self.details = None - self.documentation_link = None - - -class KeepTokenFilter(TokenFilter): - """A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :type odata_type: str - :param name: Required. The name of the token filter. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :type name: str - :param keep_words: Required. The list of words to keep. - :type keep_words: list[str] - :param lower_case_keep_words: A value indicating whether to lower case all words first. Default - is false. - :type lower_case_keep_words: bool - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - 'keep_words': {'required': True}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'keep_words': {'key': 'keepWords', 'type': '[str]'}, - 'lower_case_keep_words': {'key': 'keepWordsCase', 'type': 'bool'}, - } - - def __init__( - self, - *, - name: str, - keep_words: List[str], - lower_case_keep_words: Optional[bool] = False, + *, + name: str, + keep_words: List[str], + lower_case_keep_words: Optional[bool] = False, **kwargs ): super(KeepTokenFilter, self).__init__(name=name, **kwargs) @@ -2641,7 +2047,7 @@ def __init__( self.lower_case_keep_words = lower_case_keep_words -class KeyPhraseExtractionSkill(Skill): +class KeyPhraseExtractionSkill(SearchIndexerSkill): """A skill that uses text analytics for key phrase extraction. All required parameters must be populated in order to send to Azure. @@ -2756,7 +2162,7 @@ def __init__( self.ignore_case = ignore_case -class KeywordTokenizer(Tokenizer): +class KeywordTokenizer(LexicalTokenizer): """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -2795,7 +2201,7 @@ def __init__( self.buffer_size = buffer_size -class KeywordTokenizerV2(Tokenizer): +class KeywordTokenizerV2(LexicalTokenizer): """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -2836,7 +2242,7 @@ def __init__( self.max_token_length = max_token_length -class LanguageDetectionSkill(Skill): +class LanguageDetectionSkill(SearchIndexerSkill): """A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. All required parameters must be populated in order to send to Azure. @@ -2903,39 +2309,39 @@ class LengthTokenFilter(TokenFilter): spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. :type name: str - :param min: The minimum length in characters. Default is 0. Maximum is 300. Must be less than - the value of max. - :type min: int - :param max: The maximum length in characters. Default and maximum is 300. - :type max: int + :param min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be less + than the value of max. + :type min_length: int + :param max_length: The maximum length in characters. Default and maximum is 300. + :type max_length: int """ _validation = { 'odata_type': {'required': True}, 'name': {'required': True}, - 'min': {'maximum': 300}, - 'max': {'maximum': 300}, + 'min_length': {'maximum': 300}, + 'max_length': {'maximum': 300}, } _attribute_map = { 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'min': {'key': 'min', 'type': 'int'}, - 'max': {'key': 'max', 'type': 'int'}, + 'min_length': {'key': 'min', 'type': 'int'}, + 'max_length': {'key': 'max', 'type': 'int'}, } def __init__( self, *, name: str, - min: Optional[int] = 0, - max: Optional[int] = 300, + min_length: Optional[int] = 0, + max_length: Optional[int] = 300, **kwargs ): super(LengthTokenFilter, self).__init__(name=name, **kwargs) self.odata_type = '#Microsoft.Azure.Search.LengthTokenFilter' - self.min = min - self.max = max + self.min_length = min_length + self.max_length = max_length class LimitTokenFilter(TokenFilter): @@ -2991,7 +2397,7 @@ class ListDataSourcesResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar data_sources: Required. The datasources in the Search service. - :vartype data_sources: list[~search_service_client.models.DataSource] + :vartype data_sources: list[~search_service_client.models.SearchIndexerDataSource] """ _validation = { @@ -2999,7 +2405,7 @@ class ListDataSourcesResult(msrest.serialization.Model): } _attribute_map = { - 'data_sources': {'key': 'value', 'type': '[DataSource]'}, + 'data_sources': {'key': 'value', 'type': '[SearchIndexerDataSource]'}, } def __init__( @@ -3018,7 +2424,7 @@ class ListIndexersResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar indexers: Required. The indexers in the Search service. - :vartype indexers: list[~search_service_client.models.Indexer] + :vartype indexers: list[~search_service_client.models.SearchIndexer] """ _validation = { @@ -3026,7 +2432,7 @@ class ListIndexersResult(msrest.serialization.Model): } _attribute_map = { - 'indexers': {'key': 'value', 'type': '[Indexer]'}, + 'indexers': {'key': 'value', 'type': '[SearchIndexer]'}, } def __init__( @@ -3045,7 +2451,7 @@ class ListIndexesResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar indexes: Required. The indexes in the Search service. - :vartype indexes: list[~search_service_client.models.Index] + :vartype indexes: list[~search_service_client.models.SearchIndex] """ _validation = { @@ -3053,7 +2459,7 @@ class ListIndexesResult(msrest.serialization.Model): } _attribute_map = { - 'indexes': {'key': 'value', 'type': '[Index]'}, + 'indexes': {'key': 'value', 'type': '[SearchIndex]'}, } def __init__( @@ -3065,14 +2471,14 @@ def __init__( class ListSkillsetsResult(msrest.serialization.Model): - """Response from a list Skillset request. If successful, it includes the full definitions of all skillsets. + """Response from a list skillset request. If successful, it includes the full definitions of all skillsets. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar skillsets: Required. The skillsets defined in the Search service. - :vartype skillsets: list[~search_service_client.models.Skillset] + :vartype skillsets: list[~search_service_client.models.SearchIndexerSkillset] """ _validation = { @@ -3080,7 +2486,7 @@ class ListSkillsetsResult(msrest.serialization.Model): } _attribute_map = { - 'skillsets': {'key': 'value', 'type': '[Skillset]'}, + 'skillsets': {'key': 'value', 'type': '[SearchIndexerSkillset]'}, } def __init__( @@ -3118,6 +2524,133 @@ def __init__( self.synonym_maps = None +class LuceneStandardAnalyzer(LexicalAnalyzer): + """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + :param stopwords: A list of stopwords. + :type stopwords: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + 'stopwords': {'key': 'stopwords', 'type': '[str]'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 255, + stopwords: Optional[List[str]] = None, + **kwargs + ): + super(LuceneStandardAnalyzer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' + self.max_token_length = max_token_length + self.stopwords = stopwords + + +class LuceneStandardTokenizer(LexicalTokenizer): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 255, + **kwargs + ): + super(LuceneStandardTokenizer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' + self.max_token_length = max_token_length + + +class LuceneStandardTokenizerV2(LexicalTokenizer): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 255, + **kwargs + ): + super(LuceneStandardTokenizerV2, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' + self.max_token_length = max_token_length + + class MagnitudeScoringFunction(ScoringFunction): """Defines a function that boosts scores based on the magnitude of a numeric field. @@ -3248,7 +2781,7 @@ def __init__( self.mappings = mappings -class MergeSkill(Skill): +class MergeSkill(SearchIndexerSkill): """A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. All required parameters must be populated in order to send to Azure. @@ -3315,7 +2848,7 @@ def __init__( self.insert_post_tag = insert_post_tag -class MicrosoftLanguageStemmingTokenizer(Tokenizer): +class MicrosoftLanguageStemmingTokenizer(LexicalTokenizer): """Divides text using language-specific rules and reduces words to their base forms. All required parameters must be populated in order to send to Azure. @@ -3375,7 +2908,7 @@ def __init__( self.language = language -class MicrosoftLanguageTokenizer(Tokenizer): +class MicrosoftLanguageTokenizer(LexicalTokenizer): """Divides text using language-specific rules. All required parameters must be populated in order to send to Azure. @@ -3527,7 +3060,7 @@ def __init__( self.max_gram = max_gram -class NGramTokenizer(Tokenizer): +class NGramTokenizer(LexicalTokenizer): """Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3579,7 +3112,7 @@ def __init__( self.token_chars = token_chars -class OcrSkill(Skill): +class OcrSkill(SearchIndexerSkill): """A skill that extracts text from image files. All required parameters must be populated in order to send to Azure. @@ -3686,7 +3219,7 @@ def __init__( self.target_name = target_name -class PathHierarchyTokenizerV2(Tokenizer): +class PathHierarchyTokenizerV2(LexicalTokenizer): """Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3747,7 +3280,7 @@ def __init__( self.number_of_tokens_to_skip = number_of_tokens_to_skip -class PatternAnalyzer(Analyzer): +class PatternAnalyzer(LexicalAnalyzer): """Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3763,7 +3296,7 @@ class PatternAnalyzer(Analyzer): true. :type lower_case_terms: bool :param pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more whitespace characters. + expression that matches one or more non-word characters. :type pattern: str :param flags: Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. @@ -3942,7 +3475,7 @@ def __init__( self.replacement = replacement -class PatternTokenizer(Tokenizer): +class PatternTokenizer(LexicalTokenizer): """Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3955,7 +3488,7 @@ class PatternTokenizer(Tokenizer): 128 characters. :type name: str :param pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more whitespace characters. + expression that matches one or more non-word characters. :type pattern: str :param flags: Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. @@ -4126,58 +3659,824 @@ class ScoringProfile(msrest.serialization.Model): def __init__( self, - *, - name: str, - text_weights: Optional["TextWeights"] = None, - functions: Optional[List["ScoringFunction"]] = None, - function_aggregation: Optional[Union[str, "ScoringFunctionAggregation"]] = None, + *, + name: str, + text_weights: Optional["TextWeights"] = None, + functions: Optional[List["ScoringFunction"]] = None, + function_aggregation: Optional[Union[str, "ScoringFunctionAggregation"]] = None, + **kwargs + ): + super(ScoringProfile, self).__init__(**kwargs) + self.name = name + self.text_weights = text_weights + self.functions = functions + self.function_aggregation = function_aggregation + + +class SearchError(msrest.serialization.Model): + """Describes an error condition for the Azure Cognitive Search API. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar code: One of a server-defined set of error codes. + :vartype code: str + :ivar message: Required. A human-readable representation of the error. + :vartype message: str + :ivar details: An array of details about specific errors that led to this reported error. + :vartype details: list[~search_service_client.models.SearchError] + """ + + _validation = { + 'code': {'readonly': True}, + 'message': {'required': True, 'readonly': True}, + 'details': {'readonly': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[SearchError]'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchError, self).__init__(**kwargs) + self.code = None + self.message = None + self.details = None + + +class SearchField(msrest.serialization.Model): + """Represents a field in an index definition, which describes the name, data type, and search behavior of a field. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the field, which must be unique within the fields collection + of the index or parent field. + :type name: str + :param type: Required. The data type of the field. Possible values include: 'Edm.String', + 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', + 'Edm.GeographyPoint', 'Edm.ComplexType'. + :type type: str or ~search_service_client.models.SearchFieldDataType + :param key: A value indicating whether the field uniquely identifies documents in the index. + Exactly one top-level field in each index must be chosen as the key field and it must be of + type Edm.String. Key fields can be used to look up documents directly and update or delete + specific documents. Default is false for simple fields and null for complex fields. + :type key: bool + :param retrievable: A value indicating whether the field can be returned in a search result. + You can disable this option if you want to use a field (for example, margin) as a filter, + sorting, or scoring mechanism but do not want the field to be visible to the end user. This + property must be true for key fields, and it must be null for complex fields. This property can + be changed on existing fields. Enabling this property does not cause any increase in index + storage requirements. Default is true for simple fields and null for complex fields. + :type retrievable: bool + :param searchable: A value indicating whether the field is full-text searchable. This means it + will undergo analysis such as word-breaking during indexing. If you set a searchable field to a + value like "sunny day", internally it will be split into the individual tokens "sunny" and + "day". This enables full-text searches for these terms. Fields of type Edm.String or + Collection(Edm.String) are searchable by default. This property must be false for simple fields + of other non-string data types, and it must be null for complex fields. Note: searchable fields + consume extra space in your index since Azure Cognitive Search will store an additional + tokenized version of the field value for full-text searches. If you want to save space in your + index and you don't need a field to be included in searches, set searchable to false. + :type searchable: bool + :param filterable: A value indicating whether to enable the field to be referenced in $filter + queries. filterable differs from searchable in how strings are handled. Fields of type + Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so + comparisons are for exact matches only. For example, if you set such a field f to "sunny day", + $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property + must be null for complex fields. Default is true for simple fields and null for complex fields. + :type filterable: bool + :param sortable: A value indicating whether to enable the field to be referenced in $orderby + expressions. By default Azure Cognitive Search sorts results by score, but in many experiences + users will want to sort by fields in the documents. A simple field can be sortable only if it + is single-valued (it has a single value in the scope of the parent document). Simple collection + fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex + collections are also multi-valued, and therefore cannot be sortable. This is true whether it's + an immediate parent field, or an ancestor field, that's the complex collection. Complex fields + cannot be sortable and the sortable property must be null for such fields. The default for + sortable is true for single-valued simple fields, false for multi-valued simple fields, and + null for complex fields. + :type sortable: bool + :param facetable: A value indicating whether to enable the field to be referenced in facet + queries. Typically used in a presentation of search results that includes hit count by category + (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so + on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or + Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple + fields. + :type facetable: bool + :param analyzer: The name of the analyzer to use for the field. This option can be used only + with searchable fields and it can't be set together with either searchAnalyzer or + indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null + for complex fields. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', + 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- + Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', + 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', + 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', + 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', + 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', + 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', + 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', + 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', + 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- + PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', + 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', + 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', + 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', + 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', + 'simple', 'stop', 'whitespace'. + :type analyzer: str or ~search_service_client.models.LexicalAnalyzerName + :param search_analyzer: The name of the analyzer used at search time for the field. This option + can be used only with searchable fields. It must be set together with indexAnalyzer and it + cannot be set together with the analyzer option. This property cannot be set to the name of a + language analyzer; use the analyzer property instead if you need a language analyzer. This + analyzer can be updated on an existing field. Must be null for complex fields. Possible values + include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft', + 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh-Hans.lucene', 'zh- + Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', 'cs.lucene', 'da.microsoft', + 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', 'en.lucene', 'et.microsoft', + 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', 'gl.lucene', 'de.microsoft', + 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', 'he.microsoft', 'hi.microsoft', + 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', 'id.microsoft', 'id.lucene', + 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', 'ja.lucene', 'kn.microsoft', + 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', 'lt.microsoft', 'ml.microsoft', + 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', 'fa.lucene', 'pl.microsoft', + 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', 'pt-PT.lucene', + 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- + cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', + 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', + 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', + 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', + 'whitespace'. + :type search_analyzer: str or ~search_service_client.models.LexicalAnalyzerName + :param index_analyzer: The name of the analyzer used at indexing time for the field. This + option can be used only with searchable fields. It must be set together with searchAnalyzer and + it cannot be set together with the analyzer option. This property cannot be set to the name of + a language analyzer; use the analyzer property instead if you need a language analyzer. Once + the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. + Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', + 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh- + Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', + 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', + 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', + 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', + 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', + 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', + 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', + 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', + 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', + 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- + cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', + 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', + 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', + 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', + 'whitespace'. + :type index_analyzer: str or ~search_service_client.models.LexicalAnalyzerName + :param synonym_maps: A list of the names of synonym maps to associate with this field. This + option can be used only with searchable fields. Currently only one synonym map per field is + supported. Assigning a synonym map to a field ensures that query terms targeting that field are + expanded at query-time using the rules in the synonym map. This attribute can be changed on + existing fields. Must be null or an empty collection for complex fields. + :type synonym_maps: list[str] + :param fields: A list of sub-fields if this is a field of type Edm.ComplexType or + Collection(Edm.ComplexType). Must be null or empty for simple fields. + :type fields: list[~search_service_client.models.SearchField] + """ + + _validation = { + 'name': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'key': {'key': 'key', 'type': 'bool'}, + 'retrievable': {'key': 'retrievable', 'type': 'bool'}, + 'searchable': {'key': 'searchable', 'type': 'bool'}, + 'filterable': {'key': 'filterable', 'type': 'bool'}, + 'sortable': {'key': 'sortable', 'type': 'bool'}, + 'facetable': {'key': 'facetable', 'type': 'bool'}, + 'analyzer': {'key': 'analyzer', 'type': 'str'}, + 'search_analyzer': {'key': 'searchAnalyzer', 'type': 'str'}, + 'index_analyzer': {'key': 'indexAnalyzer', 'type': 'str'}, + 'synonym_maps': {'key': 'synonymMaps', 'type': '[str]'}, + 'fields': {'key': 'fields', 'type': '[SearchField]'}, + } + + def __init__( + self, + *, + name: str, + type: Union[str, "SearchFieldDataType"], + key: Optional[bool] = None, + retrievable: Optional[bool] = None, + searchable: Optional[bool] = None, + filterable: Optional[bool] = None, + sortable: Optional[bool] = None, + facetable: Optional[bool] = None, + analyzer: Optional[Union[str, "LexicalAnalyzerName"]] = None, + search_analyzer: Optional[Union[str, "LexicalAnalyzerName"]] = None, + index_analyzer: Optional[Union[str, "LexicalAnalyzerName"]] = None, + synonym_maps: Optional[List[str]] = None, + fields: Optional[List["SearchField"]] = None, + **kwargs + ): + super(SearchField, self).__init__(**kwargs) + self.name = name + self.type = type + self.key = key + self.retrievable = retrievable + self.searchable = searchable + self.filterable = filterable + self.sortable = sortable + self.facetable = facetable + self.analyzer = analyzer + self.search_analyzer = search_analyzer + self.index_analyzer = index_analyzer + self.synonym_maps = synonym_maps + self.fields = fields + + +class SearchIndex(msrest.serialization.Model): + """Represents a search index definition, which describes the fields and search behavior of an index. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the index. + :type name: str + :param fields: Required. The fields of the index. + :type fields: list[~search_service_client.models.SearchField] + :param scoring_profiles: The scoring profiles for the index. + :type scoring_profiles: list[~search_service_client.models.ScoringProfile] + :param default_scoring_profile: The name of the scoring profile to use if none is specified in + the query. If this property is not set and no scoring profile is specified in the query, then + default scoring (tf-idf) will be used. + :type default_scoring_profile: str + :param cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. + :type cors_options: ~search_service_client.models.CorsOptions + :param suggesters: The suggesters for the index. + :type suggesters: list[~search_service_client.models.Suggester] + :param analyzers: The analyzers for the index. + :type analyzers: list[~search_service_client.models.LexicalAnalyzer] + :param tokenizers: The tokenizers for the index. + :type tokenizers: list[~search_service_client.models.LexicalTokenizer] + :param token_filters: The token filters for the index. + :type token_filters: list[~search_service_client.models.TokenFilter] + :param char_filters: The character filters for the index. + :type char_filters: list[~search_service_client.models.CharFilter] + :param encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your data when you + want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive + Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive + Search will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with + customer-managed keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :type encryption_key: ~search_service_client.models.SearchResourceEncryptionKey + :param similarity: The type of similarity algorithm to be used when scoring and ranking the + documents matching a search query. The similarity algorithm can only be defined at index + creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity + algorithm is used. + :type similarity: ~search_service_client.models.Similarity + :param e_tag: The ETag of the index. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'fields': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'fields': {'key': 'fields', 'type': '[SearchField]'}, + 'scoring_profiles': {'key': 'scoringProfiles', 'type': '[ScoringProfile]'}, + 'default_scoring_profile': {'key': 'defaultScoringProfile', 'type': 'str'}, + 'cors_options': {'key': 'corsOptions', 'type': 'CorsOptions'}, + 'suggesters': {'key': 'suggesters', 'type': '[Suggester]'}, + 'analyzers': {'key': 'analyzers', 'type': '[LexicalAnalyzer]'}, + 'tokenizers': {'key': 'tokenizers', 'type': '[LexicalTokenizer]'}, + 'token_filters': {'key': 'tokenFilters', 'type': '[TokenFilter]'}, + 'char_filters': {'key': 'charFilters', 'type': '[CharFilter]'}, + 'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'}, + 'similarity': {'key': 'similarity', 'type': 'Similarity'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + fields: List["SearchField"], + scoring_profiles: Optional[List["ScoringProfile"]] = None, + default_scoring_profile: Optional[str] = None, + cors_options: Optional["CorsOptions"] = None, + suggesters: Optional[List["Suggester"]] = None, + analyzers: Optional[List["LexicalAnalyzer"]] = None, + tokenizers: Optional[List["LexicalTokenizer"]] = None, + token_filters: Optional[List["TokenFilter"]] = None, + char_filters: Optional[List["CharFilter"]] = None, + encryption_key: Optional["SearchResourceEncryptionKey"] = None, + similarity: Optional["Similarity"] = None, + e_tag: Optional[str] = None, + **kwargs + ): + super(SearchIndex, self).__init__(**kwargs) + self.name = name + self.fields = fields + self.scoring_profiles = scoring_profiles + self.default_scoring_profile = default_scoring_profile + self.cors_options = cors_options + self.suggesters = suggesters + self.analyzers = analyzers + self.tokenizers = tokenizers + self.token_filters = token_filters + self.char_filters = char_filters + self.encryption_key = encryption_key + self.similarity = similarity + self.e_tag = e_tag + + +class SearchIndexer(msrest.serialization.Model): + """Represents an indexer. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the indexer. + :type name: str + :param description: The description of the indexer. + :type description: str + :param data_source_name: Required. The name of the datasource from which this indexer reads + data. + :type data_source_name: str + :param skillset_name: The name of the skillset executing with this indexer. + :type skillset_name: str + :param target_index_name: Required. The name of the index to which this indexer writes data. + :type target_index_name: str + :param schedule: The schedule for this indexer. + :type schedule: ~search_service_client.models.IndexingSchedule + :param parameters: Parameters for indexer execution. + :type parameters: ~search_service_client.models.IndexingParameters + :param field_mappings: Defines mappings between fields in the data source and corresponding + target fields in the index. + :type field_mappings: list[~search_service_client.models.FieldMapping] + :param output_field_mappings: Output field mappings are applied after enrichment and + immediately before indexing. + :type output_field_mappings: list[~search_service_client.models.FieldMapping] + :param is_disabled: A value indicating whether the indexer is disabled. Default is false. + :type is_disabled: bool + :param e_tag: The ETag of the indexer. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'data_source_name': {'required': True}, + 'target_index_name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'data_source_name': {'key': 'dataSourceName', 'type': 'str'}, + 'skillset_name': {'key': 'skillsetName', 'type': 'str'}, + 'target_index_name': {'key': 'targetIndexName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'IndexingSchedule'}, + 'parameters': {'key': 'parameters', 'type': 'IndexingParameters'}, + 'field_mappings': {'key': 'fieldMappings', 'type': '[FieldMapping]'}, + 'output_field_mappings': {'key': 'outputFieldMappings', 'type': '[FieldMapping]'}, + 'is_disabled': {'key': 'disabled', 'type': 'bool'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + data_source_name: str, + target_index_name: str, + description: Optional[str] = None, + skillset_name: Optional[str] = None, + schedule: Optional["IndexingSchedule"] = None, + parameters: Optional["IndexingParameters"] = None, + field_mappings: Optional[List["FieldMapping"]] = None, + output_field_mappings: Optional[List["FieldMapping"]] = None, + is_disabled: Optional[bool] = False, + e_tag: Optional[str] = None, + **kwargs + ): + super(SearchIndexer, self).__init__(**kwargs) + self.name = name + self.description = description + self.data_source_name = data_source_name + self.skillset_name = skillset_name + self.target_index_name = target_index_name + self.schedule = schedule + self.parameters = parameters + self.field_mappings = field_mappings + self.output_field_mappings = output_field_mappings + self.is_disabled = is_disabled + self.e_tag = e_tag + + +class SearchIndexerDataContainer(msrest.serialization.Model): + """Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the table or view (for Azure SQL data source) or collection + (for CosmosDB data source) that will be indexed. + :type name: str + :param query: A query that is applied to this data container. The syntax and meaning of this + parameter is datasource-specific. Not supported by Azure SQL datasources. + :type query: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'query': {'key': 'query', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + query: Optional[str] = None, + **kwargs + ): + super(SearchIndexerDataContainer, self).__init__(**kwargs) + self.name = name + self.query = query + + +class SearchIndexerDataSource(msrest.serialization.Model): + """Represents a datasource definition, which can be used to configure an indexer. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the datasource. + :type name: str + :param description: The description of the datasource. + :type description: str + :param type: Required. The type of the datasource. Possible values include: 'azuresql', + 'cosmosdb', 'azureblob', 'azuretable', 'mysql'. + :type type: str or ~search_service_client.models.SearchIndexerDataSourceType + :param credentials: Required. Credentials for the datasource. + :type credentials: ~search_service_client.models.DataSourceCredentials + :param container: Required. The data container for the datasource. + :type container: ~search_service_client.models.SearchIndexerDataContainer + :param data_change_detection_policy: The data change detection policy for the datasource. + :type data_change_detection_policy: ~search_service_client.models.DataChangeDetectionPolicy + :param data_deletion_detection_policy: The data deletion detection policy for the datasource. + :type data_deletion_detection_policy: ~search_service_client.models.DataDeletionDetectionPolicy + :param e_tag: The ETag of the data source. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'type': {'required': True}, + 'credentials': {'required': True}, + 'container': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'DataSourceCredentials'}, + 'container': {'key': 'container', 'type': 'SearchIndexerDataContainer'}, + 'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'}, + 'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + type: Union[str, "SearchIndexerDataSourceType"], + credentials: "DataSourceCredentials", + container: "SearchIndexerDataContainer", + description: Optional[str] = None, + data_change_detection_policy: Optional["DataChangeDetectionPolicy"] = None, + data_deletion_detection_policy: Optional["DataDeletionDetectionPolicy"] = None, + e_tag: Optional[str] = None, + **kwargs + ): + super(SearchIndexerDataSource, self).__init__(**kwargs) + self.name = name + self.description = description + self.type = type + self.credentials = credentials + self.container = container + self.data_change_detection_policy = data_change_detection_policy + self.data_deletion_detection_policy = data_deletion_detection_policy + self.e_tag = e_tag + + +class SearchIndexerError(msrest.serialization.Model): + """Represents an item- or document-level indexing error. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar key: The key of the item for which indexing failed. + :vartype key: str + :ivar error_message: Required. The message describing the error that occurred while processing + the item. + :vartype error_message: str + :ivar status_code: Required. The status code indicating why the indexing operation failed. + Possible values include: 400 for a malformed input document, 404 for document not found, 409 + for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the + service is too busy. + :vartype status_code: int + :ivar name: The name of the source at which the error originated. For example, this could refer + to a particular skill in the attached skillset. This may not be always available. + :vartype name: str + :ivar details: Additional, verbose details about the error to assist in debugging the indexer. + This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of errors. This + may not be always available. + :vartype documentation_link: str + """ + + _validation = { + 'key': {'readonly': True}, + 'error_message': {'required': True, 'readonly': True}, + 'status_code': {'required': True, 'readonly': True}, + 'name': {'readonly': True}, + 'details': {'readonly': True}, + 'documentation_link': {'readonly': True}, + } + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + 'status_code': {'key': 'statusCode', 'type': 'int'}, + 'name': {'key': 'name', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexerError, self).__init__(**kwargs) + self.key = None + self.error_message = None + self.status_code = None + self.name = None + self.details = None + self.documentation_link = None + + +class SearchIndexerLimits(msrest.serialization.Model): + """SearchIndexerLimits. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar max_run_time: The maximum duration that the indexer is permitted to run for one + execution. + :vartype max_run_time: ~datetime.timedelta + :ivar max_document_extraction_size: The maximum size of a document, in bytes, which will be + considered valid for indexing. + :vartype max_document_extraction_size: long + :ivar max_document_content_characters_to_extract: The maximum number of characters that will be + extracted from a document picked up for indexing. + :vartype max_document_content_characters_to_extract: long + """ + + _validation = { + 'max_run_time': {'readonly': True}, + 'max_document_extraction_size': {'readonly': True}, + 'max_document_content_characters_to_extract': {'readonly': True}, + } + + _attribute_map = { + 'max_run_time': {'key': 'maxRunTime', 'type': 'duration'}, + 'max_document_extraction_size': {'key': 'maxDocumentExtractionSize', 'type': 'long'}, + 'max_document_content_characters_to_extract': {'key': 'maxDocumentContentCharactersToExtract', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexerLimits, self).__init__(**kwargs) + self.max_run_time = None + self.max_document_extraction_size = None + self.max_document_content_characters_to_extract = None + + +class SearchIndexerSkillset(msrest.serialization.Model): + """A list of skills. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the skillset. + :type name: str + :param description: Required. The description of the skillset. + :type description: str + :param skills: Required. A list of skills in the skillset. + :type skills: list[~search_service_client.models.SearchIndexerSkill] + :param cognitive_services_account: Details about cognitive services to be used when running + skills. + :type cognitive_services_account: ~search_service_client.models.CognitiveServicesAccount + :param e_tag: The ETag of the skillset. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'description': {'required': True}, + 'skills': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'skills': {'key': 'skills', 'type': '[SearchIndexerSkill]'}, + 'cognitive_services_account': {'key': 'cognitiveServices', 'type': 'CognitiveServicesAccount'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + description: str, + skills: List["SearchIndexerSkill"], + cognitive_services_account: Optional["CognitiveServicesAccount"] = None, + e_tag: Optional[str] = None, + **kwargs + ): + super(SearchIndexerSkillset, self).__init__(**kwargs) + self.name = name + self.description = description + self.skills = skills + self.cognitive_services_account = cognitive_services_account + self.e_tag = e_tag + + +class SearchIndexerStatus(msrest.serialization.Model): + """Represents the current status and execution history of an indexer. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar status: Required. Overall indexer status. Possible values include: 'unknown', 'error', + 'running'. + :vartype status: str or ~search_service_client.models.IndexerStatus + :ivar last_result: The result of the most recent or an in-progress indexer execution. + :vartype last_result: ~search_service_client.models.IndexerExecutionResult + :ivar execution_history: Required. History of the recent indexer executions, sorted in reverse + chronological order. + :vartype execution_history: list[~search_service_client.models.IndexerExecutionResult] + :ivar limits: Required. The execution limits for the indexer. + :vartype limits: ~search_service_client.models.SearchIndexerLimits + """ + + _validation = { + 'status': {'required': True, 'readonly': True}, + 'last_result': {'readonly': True}, + 'execution_history': {'required': True, 'readonly': True}, + 'limits': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'last_result': {'key': 'lastResult', 'type': 'IndexerExecutionResult'}, + 'execution_history': {'key': 'executionHistory', 'type': '[IndexerExecutionResult]'}, + 'limits': {'key': 'limits', 'type': 'SearchIndexerLimits'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexerStatus, self).__init__(**kwargs) + self.status = None + self.last_result = None + self.execution_history = None + self.limits = None + + +class SearchIndexerWarning(msrest.serialization.Model): + """Represents an item-level warning. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar key: The key of the item which generated a warning. + :vartype key: str + :ivar message: Required. The message describing the warning that occurred while processing the + item. + :vartype message: str + :ivar name: The name of the source at which the warning originated. For example, this could + refer to a particular skill in the attached skillset. This may not be always available. + :vartype name: str + :ivar details: Additional, verbose details about the warning to assist in debugging the + indexer. This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of warnings. This + may not be always available. + :vartype documentation_link: str + """ + + _validation = { + 'key': {'readonly': True}, + 'message': {'required': True, 'readonly': True}, + 'name': {'readonly': True}, + 'details': {'readonly': True}, + 'documentation_link': {'readonly': True}, + } + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, + } + + def __init__( + self, **kwargs ): - super(ScoringProfile, self).__init__(**kwargs) - self.name = name - self.text_weights = text_weights - self.functions = functions - self.function_aggregation = function_aggregation - + super(SearchIndexerWarning, self).__init__(**kwargs) + self.key = None + self.message = None + self.name = None + self.details = None + self.documentation_link = None -class SearchError(msrest.serialization.Model): - """Describes an error condition for the Azure Cognitive Search API. - Variables are only populated by the server, and will be ignored when sending a request. +class SearchResourceEncryptionKey(msrest.serialization.Model): + """A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. All required parameters must be populated in order to send to Azure. - :ivar code: One of a server-defined set of error codes. - :vartype code: str - :ivar message: Required. A human-readable representation of the error. - :vartype message: str - :ivar details: An array of details about specific errors that led to this reported error. - :vartype details: list[~search_service_client.models.SearchError] + :param key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data + at rest. + :type key_name: str + :param key_version: Required. The version of your Azure Key Vault key to be used to encrypt + your data at rest. + :type key_version: str + :param vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that + contains the key to be used to encrypt your data at rest. An example URI might be https://my- + keyvault-name.vault.azure.net. + :type vault_uri: str + :param access_credentials: Optional Azure Active Directory credentials used for accessing your + Azure Key Vault. Not required if using managed identity instead. + :type access_credentials: + ~search_service_client.models.AzureActiveDirectoryApplicationCredentials """ _validation = { - 'code': {'readonly': True}, - 'message': {'required': True, 'readonly': True}, - 'details': {'readonly': True}, + 'key_name': {'required': True}, + 'key_version': {'required': True}, + 'vault_uri': {'required': True}, } _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'details': {'key': 'details', 'type': '[SearchError]'}, + 'key_name': {'key': 'keyVaultKeyName', 'type': 'str'}, + 'key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'}, + 'vault_uri': {'key': 'keyVaultUri', 'type': 'str'}, + 'access_credentials': {'key': 'accessCredentials', 'type': 'AzureActiveDirectoryApplicationCredentials'}, } def __init__( self, + *, + key_name: str, + key_version: str, + vault_uri: str, + access_credentials: Optional["AzureActiveDirectoryApplicationCredentials"] = None, **kwargs ): - super(SearchError, self).__init__(**kwargs) - self.code = None - self.message = None - self.details = None + super(SearchResourceEncryptionKey, self).__init__(**kwargs) + self.key_name = key_name + self.key_version = key_version + self.vault_uri = vault_uri + self.access_credentials = access_credentials -class SentimentSkill(Skill): +class SentimentSkill(SearchIndexerSkill): """Text analytics positive-negative sentiment analysis, scored as a floating point value in a range of zero to 1. All required parameters must be populated in order to send to Azure. @@ -4374,7 +4673,7 @@ def __init__( self.limits = limits -class ShaperSkill(Skill): +class ShaperSkill(SearchIndexerSkill): """A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). All required parameters must be populated in order to send to Azure. @@ -4501,56 +4800,6 @@ def __init__( self.filter_token = filter_token -class Skillset(msrest.serialization.Model): - """A list of skills. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the skillset. - :type name: str - :param description: Required. The description of the skillset. - :type description: str - :param skills: Required. A list of skills in the skillset. - :type skills: list[~search_service_client.models.Skill] - :param cognitive_services_account: Details about cognitive services to be used when running - skills. - :type cognitive_services_account: ~search_service_client.models.CognitiveServicesAccount - :param e_tag: The ETag of the skillset. - :type e_tag: str - """ - - _validation = { - 'name': {'required': True}, - 'description': {'required': True}, - 'skills': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'skills': {'key': 'skills', 'type': '[Skill]'}, - 'cognitive_services_account': {'key': 'cognitiveServices', 'type': 'CognitiveServicesAccount'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - description: str, - skills: List["Skill"], - cognitive_services_account: Optional["CognitiveServicesAccount"] = None, - e_tag: Optional[str] = None, - **kwargs - ): - super(Skillset, self).__init__(**kwargs) - self.name = name - self.description = description - self.skills = skills - self.cognitive_services_account = cognitive_services_account - self.e_tag = e_tag - - class SnowballTokenFilter(TokenFilter): """A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. @@ -4631,7 +4880,7 @@ def __init__( self.soft_delete_marker_value = soft_delete_marker_value -class SplitSkill(Skill): +class SplitSkill(SearchIndexerSkill): """A skill to split a string into chunks of text. All required parameters must be populated in order to send to Azure. @@ -4729,133 +4978,6 @@ def __init__( self.odata_type = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' -class StandardAnalyzer(Analyzer): - """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str - :param max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :type max_token_length: int - :param stopwords: A list of stopwords. - :type stopwords: list[str] - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - 'max_token_length': {'maximum': 300}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, - 'stopwords': {'key': 'stopwords', 'type': '[str]'}, - } - - def __init__( - self, - *, - name: str, - max_token_length: Optional[int] = 255, - stopwords: Optional[List[str]] = None, - **kwargs - ): - super(StandardAnalyzer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' - self.max_token_length = max_token_length - self.stopwords = stopwords - - -class StandardTokenizer(Tokenizer): - """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str - :param max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. - :type max_token_length: int - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, - } - - def __init__( - self, - *, - name: str, - max_token_length: Optional[int] = 255, - **kwargs - ): - super(StandardTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' - self.max_token_length = max_token_length - - -class StandardTokenizerV2(Tokenizer): - """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str - :param max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :type max_token_length: int - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - 'max_token_length': {'maximum': 300}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, - } - - def __init__( - self, - *, - name: str, - max_token_length: Optional[int] = 255, - **kwargs - ): - super(StandardTokenizerV2, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' - self.max_token_length = max_token_length - - class StemmerOverrideTokenFilter(TokenFilter): """Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. @@ -4945,7 +5067,7 @@ def __init__( self.language = language -class StopAnalyzer(Analyzer): +class StopAnalyzer(LexicalAnalyzer): """Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -5112,7 +5234,7 @@ class SynonymMap(msrest.serialization.Model): needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :type encryption_key: ~search_service_client.models.EncryptionKey + :type encryption_key: ~search_service_client.models.SearchResourceEncryptionKey :param e_tag: The ETag of the synonym map. :type e_tag: str """ @@ -5127,7 +5249,7 @@ class SynonymMap(msrest.serialization.Model): 'name': {'key': 'name', 'type': 'str'}, 'format': {'key': 'format', 'type': 'str'}, 'synonyms': {'key': 'synonyms', 'type': 'str'}, - 'encryption_key': {'key': 'encryptionKey', 'type': 'EncryptionKey'}, + 'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'}, 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, } @@ -5138,7 +5260,7 @@ def __init__( *, name: str, synonyms: str, - encryption_key: Optional["EncryptionKey"] = None, + encryption_key: Optional["SearchResourceEncryptionKey"] = None, e_tag: Optional[str] = None, **kwargs ): @@ -5288,7 +5410,7 @@ def __init__( self.tags_parameter = tags_parameter -class TextTranslationSkill(Skill): +class TextTranslationSkill(SearchIndexerSkill): """A skill to translate text from one language to another. All required parameters must be populated in order to send to Azure. @@ -5408,51 +5530,6 @@ def __init__( self.weights = weights -class TokenInfo(msrest.serialization.Model): - """Information about a token returned by an analyzer. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar token: Required. The token returned by the analyzer. - :vartype token: str - :ivar start_offset: Required. The index of the first character of the token in the input text. - :vartype start_offset: int - :ivar end_offset: Required. The index of the last character of the token in the input text. - :vartype end_offset: int - :ivar position: Required. The position of the token in the input text relative to other tokens. - The first token in the input text has position 0, the next has position 1, and so on. Depending - on the analyzer used, some tokens might have the same position, for example if they are - synonyms of each other. - :vartype position: int - """ - - _validation = { - 'token': {'required': True, 'readonly': True}, - 'start_offset': {'required': True, 'readonly': True}, - 'end_offset': {'required': True, 'readonly': True}, - 'position': {'required': True, 'readonly': True}, - } - - _attribute_map = { - 'token': {'key': 'token', 'type': 'str'}, - 'start_offset': {'key': 'startOffset', 'type': 'int'}, - 'end_offset': {'key': 'endOffset', 'type': 'int'}, - 'position': {'key': 'position', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(TokenInfo, self).__init__(**kwargs) - self.token = None - self.start_offset = None - self.end_offset = None - self.position = None - - class TruncateTokenFilter(TokenFilter): """Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. @@ -5493,7 +5570,7 @@ def __init__( self.length = length -class UaxUrlEmailTokenizer(Tokenizer): +class UaxUrlEmailTokenizer(LexicalTokenizer): """Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -5574,7 +5651,7 @@ def __init__( self.only_on_same_position = only_on_same_position -class WebApiSkill(Skill): +class WebApiSkill(SearchIndexerSkill): """A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. All required parameters must be populated in order to send to Azure. diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py index 2b17813411e2..c96ed67d5b50 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py @@ -6,39 +6,39 @@ from enum import Enum -class DataSourceType(str, Enum): +class SearchIndexerDataSourceType(str, Enum): """Defines the type of a datasource. """ - azure_sql = "azuresql" - cosmos_db = "cosmosdb" - azure_blob = "azureblob" - azure_table = "azuretable" - my_sql = "mysql" + azure_sql = "azuresql" #: Indicates an Azure SQL datasource. + cosmos_db = "cosmosdb" #: Indicates a CosmosDB datasource. + azure_blob = "azureblob" #: Indicates a Azure Blob datasource. + azure_table = "azuretable" #: Indicates a Azure Table datasource. + my_sql = "mysql" #: Indicates a MySql datasource. class IndexerExecutionStatus(str, Enum): """Represents the status of an individual indexer execution. """ - transient_failure = "transientFailure" - success = "success" - in_progress = "inProgress" - reset = "reset" + transient_failure = "transientFailure" #: An indexer invocation has failed, but the failure may be transient. Indexer invocations will continue per schedule. + success = "success" #: Indexer execution completed successfully. + in_progress = "inProgress" #: Indexer execution is in progress. + reset = "reset" #: Indexer has been reset. -class DataType(str, Enum): +class SearchFieldDataType(str, Enum): """Defines the data type of a field in a search index. """ - edm_string = "Edm.String" - edm_int32 = "Edm.Int32" - edm_int64 = "Edm.Int64" - edm_double = "Edm.Double" - edm_boolean = "Edm.Boolean" - edm_date_time_offset = "Edm.DateTimeOffset" - edm_geography_point = "Edm.GeographyPoint" - edm_complex_type = "Edm.ComplexType" + string = "Edm.String" #: Indicates that a field contains a string. + int32 = "Edm.Int32" #: Indicates that a field contains a 32-bit signed integer. + int64 = "Edm.Int64" #: Indicates that a field contains a 64-bit signed integer. + double = "Edm.Double" #: Indicates that a field contains an IEEE double-precision floating point number. + boolean = "Edm.Boolean" #: Indicates that a field contains a Boolean value (true or false). + date_time_offset = "Edm.DateTimeOffset" #: Indicates that a field contains a date/time value, including timezone information. + geography_point = "Edm.GeographyPoint" #: Indicates that a field contains a geo-location in terms of longitude and latitude. + complex = "Edm.ComplexType" #: Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. -class AnalyzerName(str, Enum): +class LexicalAnalyzerName(str, Enum): """Defines the names of all text analyzers supported by Azure Cognitive Search. """ @@ -140,21 +140,21 @@ class ScoringFunctionInterpolation(str, Enum): """Defines the function used to interpolate score boosting across a range of documents. """ - linear = "linear" - constant = "constant" - quadratic = "quadratic" - logarithmic = "logarithmic" + linear = "linear" #: Boosts scores by a linearly decreasing amount. This is the default interpolation for scoring functions. + constant = "constant" #: Boosts scores by a constant factor. + quadratic = "quadratic" #: Boosts scores by an amount that decreases quadratically. Boosts decrease slowly for higher scores, and more quickly as the scores decrease. This interpolation option is not allowed in tag scoring functions. + logarithmic = "logarithmic" #: Boosts scores by an amount that decreases logarithmically. Boosts decrease quickly for higher scores, and more slowly as the scores decrease. This interpolation option is not allowed in tag scoring functions. class ScoringFunctionAggregation(str, Enum): """Defines the aggregation function used to combine the results of all the scoring functions in a scoring profile. """ - sum = "sum" - average = "average" - minimum = "minimum" - maximum = "maximum" - first_matching = "firstMatching" + sum = "sum" #: Boost scores by the sum of all scoring function results. + average = "average" #: Boost scores by the average of all scoring function results. + minimum = "minimum" #: Boost scores by the minimum of all scoring function results. + maximum = "maximum" #: Boost scores by the maximum of all scoring function results. + first_matching = "firstMatching" #: Boost scores using the first applicable scoring function in the scoring profile. class TokenFilterName(str, Enum): """Defines the names of all token filters supported by Azure Cognitive Search. @@ -163,7 +163,7 @@ class TokenFilterName(str, Enum): arabic_normalization = "arabic_normalization" #: A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html. apostrophe = "apostrophe" #: Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html. ascii_folding = "asciifolding" #: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html. - cjk_bigram = "cjk_bigram" #: Forms bigrams of CJK terms that are generated from StandardTokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html. + cjk_bigram = "cjk_bigram" #: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html. cjk_width = "cjk_width" #: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html. classic = "classic" #: Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html. common_gram = "common_grams" #: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html. @@ -199,53 +199,53 @@ class TokenCharacterKind(str, Enum): """Represents classes of characters on which a token filter can operate. """ - letter = "letter" - digit = "digit" - whitespace = "whitespace" - punctuation = "punctuation" - symbol = "symbol" + letter = "letter" #: Keeps letters in tokens. + digit = "digit" #: Keeps digits in tokens. + whitespace = "whitespace" #: Keeps whitespace in tokens. + punctuation = "punctuation" #: Keeps punctuation in tokens. + symbol = "symbol" #: Keeps symbols in tokens. class CjkBigramTokenFilterScripts(str, Enum): """Scripts that can be ignored by CjkBigramTokenFilter. """ - han = "han" - hiragana = "hiragana" - katakana = "katakana" - hangul = "hangul" + han = "han" #: Ignore Han script when forming bigrams of CJK terms. + hiragana = "hiragana" #: Ignore Hiragana script when forming bigrams of CJK terms. + katakana = "katakana" #: Ignore Katakana script when forming bigrams of CJK terms. + hangul = "hangul" #: Ignore Hangul script when forming bigrams of CJK terms. class VisualFeature(str, Enum): """The strings indicating what visual feature types to return. """ - adult = "adult" - brands = "brands" - categories = "categories" - description = "description" - faces = "faces" - objects = "objects" - tags = "tags" + adult = "adult" #: Visual features recognized as adult persons. + brands = "brands" #: Visual features recognized as commercial brands. + categories = "categories" #: Categories. + description = "description" #: Description. + faces = "faces" #: Visual features recognized as people faces. + objects = "objects" #: Visual features recognized as objects. + tags = "tags" #: Tags. class ImageDetail(str, Enum): """A string indicating which domain-specific details to return. """ - celebrities = "celebrities" - landmarks = "landmarks" + celebrities = "celebrities" #: Details recognized as celebrities. + landmarks = "landmarks" #: Details recognized as landmarks. class EntityCategory(str, Enum): """A string indicating what entity categories to return. """ - location = "location" - organization = "organization" - person = "person" - quantity = "quantity" - datetime = "datetime" - url = "url" - email = "email" + location = "location" #: Entities describing a physical location. + organization = "organization" #: Entities describing an organization. + person = "person" #: Entities describing a person. + quantity = "quantity" #: Entities describing a quantity. + datetime = "datetime" #: Entities describing a date and time. + url = "url" #: Entities describing a URL. + email = "email" #: Entities describing an email address. -class TokenizerName(str, Enum): +class LexicalTokenizerName(str, Enum): """Defines the names of all tokenizers supported by Azure Cognitive Search. """ @@ -268,14 +268,14 @@ class RegexFlags(str, Enum): analyzer and pattern tokenizer. """ - canon_eq = "CANON_EQ" - case_insensitive = "CASE_INSENSITIVE" - comments = "COMMENTS" - dotall = "DOTALL" - literal = "LITERAL" - multiline = "MULTILINE" - unicode_case = "UNICODE_CASE" - unix_lines = "UNIX_LINES" + canon_eq = "CANON_EQ" #: Enables canonical equivalence. + case_insensitive = "CASE_INSENSITIVE" #: Enables case-insensitive matching. + comments = "COMMENTS" #: Permits whitespace and comments in the pattern. + dot_all = "DOTALL" #: Enables dotall mode. + literal = "LITERAL" #: Enables literal parsing of the pattern. + multiline = "MULTILINE" #: Enables multiline mode. + unicode_case = "UNICODE_CASE" #: Enables Unicode-aware case folding. + unix_lines = "UNIX_LINES" #: Enables Unix lines mode. class KeyPhraseExtractionSkillLanguage(str, Enum): """The language codes supported for input text by KeyPhraseExtractionSkill. @@ -473,262 +473,262 @@ class IndexerStatus(str, Enum): """Represents the overall indexer status. """ - unknown = "unknown" - error = "error" - running = "running" + unknown = "unknown" #: Indicates that the indexer is in an unknown state. + error = "error" #: Indicates that the indexer experienced an error that cannot be corrected without human intervention. + running = "running" #: Indicates that the indexer is running normally. class MicrosoftTokenizerLanguage(str, Enum): """Lists the languages supported by the Microsoft language tokenizer. """ - bangla = "bangla" - bulgarian = "bulgarian" - catalan = "catalan" - chinese_simplified = "chineseSimplified" - chinese_traditional = "chineseTraditional" - croatian = "croatian" - czech = "czech" - danish = "danish" - dutch = "dutch" - english = "english" - french = "french" - german = "german" - greek = "greek" - gujarati = "gujarati" - hindi = "hindi" - icelandic = "icelandic" - indonesian = "indonesian" - italian = "italian" - japanese = "japanese" - kannada = "kannada" - korean = "korean" - malay = "malay" - malayalam = "malayalam" - marathi = "marathi" - norwegian_bokmaal = "norwegianBokmaal" - polish = "polish" - portuguese = "portuguese" - portuguese_brazilian = "portugueseBrazilian" - punjabi = "punjabi" - romanian = "romanian" - russian = "russian" - serbian_cyrillic = "serbianCyrillic" - serbian_latin = "serbianLatin" - slovenian = "slovenian" - spanish = "spanish" - swedish = "swedish" - tamil = "tamil" - telugu = "telugu" - thai = "thai" - ukrainian = "ukrainian" - urdu = "urdu" - vietnamese = "vietnamese" + bangla = "bangla" #: Selects the Microsoft tokenizer for Bangla. + bulgarian = "bulgarian" #: Selects the Microsoft tokenizer for Bulgarian. + catalan = "catalan" #: Selects the Microsoft tokenizer for Catalan. + chinese_simplified = "chineseSimplified" #: Selects the Microsoft tokenizer for Chinese (Simplified). + chinese_traditional = "chineseTraditional" #: Selects the Microsoft tokenizer for Chinese (Traditional). + croatian = "croatian" #: Selects the Microsoft tokenizer for Croatian. + czech = "czech" #: Selects the Microsoft tokenizer for Czech. + danish = "danish" #: Selects the Microsoft tokenizer for Danish. + dutch = "dutch" #: Selects the Microsoft tokenizer for Dutch. + english = "english" #: Selects the Microsoft tokenizer for English. + french = "french" #: Selects the Microsoft tokenizer for French. + german = "german" #: Selects the Microsoft tokenizer for German. + greek = "greek" #: Selects the Microsoft tokenizer for Greek. + gujarati = "gujarati" #: Selects the Microsoft tokenizer for Gujarati. + hindi = "hindi" #: Selects the Microsoft tokenizer for Hindi. + icelandic = "icelandic" #: Selects the Microsoft tokenizer for Icelandic. + indonesian = "indonesian" #: Selects the Microsoft tokenizer for Indonesian. + italian = "italian" #: Selects the Microsoft tokenizer for Italian. + japanese = "japanese" #: Selects the Microsoft tokenizer for Japanese. + kannada = "kannada" #: Selects the Microsoft tokenizer for Kannada. + korean = "korean" #: Selects the Microsoft tokenizer for Korean. + malay = "malay" #: Selects the Microsoft tokenizer for Malay. + malayalam = "malayalam" #: Selects the Microsoft tokenizer for Malayalam. + marathi = "marathi" #: Selects the Microsoft tokenizer for Marathi. + norwegian_bokmaal = "norwegianBokmaal" #: Selects the Microsoft tokenizer for Norwegian (Bokmål). + polish = "polish" #: Selects the Microsoft tokenizer for Polish. + portuguese = "portuguese" #: Selects the Microsoft tokenizer for Portuguese. + portuguese_brazilian = "portugueseBrazilian" #: Selects the Microsoft tokenizer for Portuguese (Brazil). + punjabi = "punjabi" #: Selects the Microsoft tokenizer for Punjabi. + romanian = "romanian" #: Selects the Microsoft tokenizer for Romanian. + russian = "russian" #: Selects the Microsoft tokenizer for Russian. + serbian_cyrillic = "serbianCyrillic" #: Selects the Microsoft tokenizer for Serbian (Cyrillic). + serbian_latin = "serbianLatin" #: Selects the Microsoft tokenizer for Serbian (Latin). + slovenian = "slovenian" #: Selects the Microsoft tokenizer for Slovenian. + spanish = "spanish" #: Selects the Microsoft tokenizer for Spanish. + swedish = "swedish" #: Selects the Microsoft tokenizer for Swedish. + tamil = "tamil" #: Selects the Microsoft tokenizer for Tamil. + telugu = "telugu" #: Selects the Microsoft tokenizer for Telugu. + thai = "thai" #: Selects the Microsoft tokenizer for Thai. + ukrainian = "ukrainian" #: Selects the Microsoft tokenizer for Ukrainian. + urdu = "urdu" #: Selects the Microsoft tokenizer for Urdu. + vietnamese = "vietnamese" #: Selects the Microsoft tokenizer for Vietnamese. class MicrosoftStemmingTokenizerLanguage(str, Enum): """Lists the languages supported by the Microsoft language stemming tokenizer. """ - arabic = "arabic" - bangla = "bangla" - bulgarian = "bulgarian" - catalan = "catalan" - croatian = "croatian" - czech = "czech" - danish = "danish" - dutch = "dutch" - english = "english" - estonian = "estonian" - finnish = "finnish" - french = "french" - german = "german" - greek = "greek" - gujarati = "gujarati" - hebrew = "hebrew" - hindi = "hindi" - hungarian = "hungarian" - icelandic = "icelandic" - indonesian = "indonesian" - italian = "italian" - kannada = "kannada" - latvian = "latvian" - lithuanian = "lithuanian" - malay = "malay" - malayalam = "malayalam" - marathi = "marathi" - norwegian_bokmaal = "norwegianBokmaal" - polish = "polish" - portuguese = "portuguese" - portuguese_brazilian = "portugueseBrazilian" - punjabi = "punjabi" - romanian = "romanian" - russian = "russian" - serbian_cyrillic = "serbianCyrillic" - serbian_latin = "serbianLatin" - slovak = "slovak" - slovenian = "slovenian" - spanish = "spanish" - swedish = "swedish" - tamil = "tamil" - telugu = "telugu" - turkish = "turkish" - ukrainian = "ukrainian" - urdu = "urdu" + arabic = "arabic" #: Selects the Microsoft stemming tokenizer for Arabic. + bangla = "bangla" #: Selects the Microsoft stemming tokenizer for Bangla. + bulgarian = "bulgarian" #: Selects the Microsoft stemming tokenizer for Bulgarian. + catalan = "catalan" #: Selects the Microsoft stemming tokenizer for Catalan. + croatian = "croatian" #: Selects the Microsoft stemming tokenizer for Croatian. + czech = "czech" #: Selects the Microsoft stemming tokenizer for Czech. + danish = "danish" #: Selects the Microsoft stemming tokenizer for Danish. + dutch = "dutch" #: Selects the Microsoft stemming tokenizer for Dutch. + english = "english" #: Selects the Microsoft stemming tokenizer for English. + estonian = "estonian" #: Selects the Microsoft stemming tokenizer for Estonian. + finnish = "finnish" #: Selects the Microsoft stemming tokenizer for Finnish. + french = "french" #: Selects the Microsoft stemming tokenizer for French. + german = "german" #: Selects the Microsoft stemming tokenizer for German. + greek = "greek" #: Selects the Microsoft stemming tokenizer for Greek. + gujarati = "gujarati" #: Selects the Microsoft stemming tokenizer for Gujarati. + hebrew = "hebrew" #: Selects the Microsoft stemming tokenizer for Hebrew. + hindi = "hindi" #: Selects the Microsoft stemming tokenizer for Hindi. + hungarian = "hungarian" #: Selects the Microsoft stemming tokenizer for Hungarian. + icelandic = "icelandic" #: Selects the Microsoft stemming tokenizer for Icelandic. + indonesian = "indonesian" #: Selects the Microsoft stemming tokenizer for Indonesian. + italian = "italian" #: Selects the Microsoft stemming tokenizer for Italian. + kannada = "kannada" #: Selects the Microsoft stemming tokenizer for Kannada. + latvian = "latvian" #: Selects the Microsoft stemming tokenizer for Latvian. + lithuanian = "lithuanian" #: Selects the Microsoft stemming tokenizer for Lithuanian. + malay = "malay" #: Selects the Microsoft stemming tokenizer for Malay. + malayalam = "malayalam" #: Selects the Microsoft stemming tokenizer for Malayalam. + marathi = "marathi" #: Selects the Microsoft stemming tokenizer for Marathi. + norwegian_bokmaal = "norwegianBokmaal" #: Selects the Microsoft stemming tokenizer for Norwegian (Bokmål). + polish = "polish" #: Selects the Microsoft stemming tokenizer for Polish. + portuguese = "portuguese" #: Selects the Microsoft stemming tokenizer for Portuguese. + portuguese_brazilian = "portugueseBrazilian" #: Selects the Microsoft stemming tokenizer for Portuguese (Brazil). + punjabi = "punjabi" #: Selects the Microsoft stemming tokenizer for Punjabi. + romanian = "romanian" #: Selects the Microsoft stemming tokenizer for Romanian. + russian = "russian" #: Selects the Microsoft stemming tokenizer for Russian. + serbian_cyrillic = "serbianCyrillic" #: Selects the Microsoft stemming tokenizer for Serbian (Cyrillic). + serbian_latin = "serbianLatin" #: Selects the Microsoft stemming tokenizer for Serbian (Latin). + slovak = "slovak" #: Selects the Microsoft stemming tokenizer for Slovak. + slovenian = "slovenian" #: Selects the Microsoft stemming tokenizer for Slovenian. + spanish = "spanish" #: Selects the Microsoft stemming tokenizer for Spanish. + swedish = "swedish" #: Selects the Microsoft stemming tokenizer for Swedish. + tamil = "tamil" #: Selects the Microsoft stemming tokenizer for Tamil. + telugu = "telugu" #: Selects the Microsoft stemming tokenizer for Telugu. + turkish = "turkish" #: Selects the Microsoft stemming tokenizer for Turkish. + ukrainian = "ukrainian" #: Selects the Microsoft stemming tokenizer for Ukrainian. + urdu = "urdu" #: Selects the Microsoft stemming tokenizer for Urdu. class EdgeNGramTokenFilterSide(str, Enum): """Specifies which side of the input an n-gram should be generated from. """ - front = "front" - back = "back" + front = "front" #: Specifies that the n-gram should be generated from the front of the input. + back = "back" #: Specifies that the n-gram should be generated from the back of the input. class PhoneticEncoder(str, Enum): """Identifies the type of phonetic encoder to use with a PhoneticTokenFilter. """ - metaphone = "metaphone" - double_metaphone = "doubleMetaphone" - soundex = "soundex" - refined_soundex = "refinedSoundex" - caverphone1 = "caverphone1" - caverphone2 = "caverphone2" - cologne = "cologne" - nysiis = "nysiis" - koelner_phonetik = "koelnerPhonetik" - haase_phonetik = "haasePhonetik" - beider_morse = "beiderMorse" + metaphone = "metaphone" #: Encodes a token into a Metaphone value. + double_metaphone = "doubleMetaphone" #: Encodes a token into a double metaphone value. + soundex = "soundex" #: Encodes a token into a Soundex value. + refined_soundex = "refinedSoundex" #: Encodes a token into a Refined Soundex value. + caverphone1 = "caverphone1" #: Encodes a token into a Caverphone 1.0 value. + caverphone2 = "caverphone2" #: Encodes a token into a Caverphone 2.0 value. + cologne = "cologne" #: Encodes a token into a Cologne Phonetic value. + nysiis = "nysiis" #: Encodes a token into a NYSIIS value. + koelner_phonetik = "koelnerPhonetik" #: Encodes a token using the Kölner Phonetik algorithm. + haase_phonetik = "haasePhonetik" #: Encodes a token using the Haase refinement of the Kölner Phonetik algorithm. + beider_morse = "beiderMorse" #: Encodes a token into a Beider-Morse value. class SnowballTokenFilterLanguage(str, Enum): """The language to use for a Snowball token filter. """ - armenian = "armenian" - basque = "basque" - catalan = "catalan" - danish = "danish" - dutch = "dutch" - english = "english" - finnish = "finnish" - french = "french" - german = "german" - german2 = "german2" - hungarian = "hungarian" - italian = "italian" - kp = "kp" - lovins = "lovins" - norwegian = "norwegian" - porter = "porter" - portuguese = "portuguese" - romanian = "romanian" - russian = "russian" - spanish = "spanish" - swedish = "swedish" - turkish = "turkish" + armenian = "armenian" #: Selects the Lucene Snowball stemming tokenizer for Armenian. + basque = "basque" #: Selects the Lucene Snowball stemming tokenizer for Basque. + catalan = "catalan" #: Selects the Lucene Snowball stemming tokenizer for Catalan. + danish = "danish" #: Selects the Lucene Snowball stemming tokenizer for Danish. + dutch = "dutch" #: Selects the Lucene Snowball stemming tokenizer for Dutch. + english = "english" #: Selects the Lucene Snowball stemming tokenizer for English. + finnish = "finnish" #: Selects the Lucene Snowball stemming tokenizer for Finnish. + french = "french" #: Selects the Lucene Snowball stemming tokenizer for French. + german = "german" #: Selects the Lucene Snowball stemming tokenizer for German. + german2 = "german2" #: Selects the Lucene Snowball stemming tokenizer that uses the German variant algorithm. + hungarian = "hungarian" #: Selects the Lucene Snowball stemming tokenizer for Hungarian. + italian = "italian" #: Selects the Lucene Snowball stemming tokenizer for Italian. + kp = "kp" #: Selects the Lucene Snowball stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. + lovins = "lovins" #: Selects the Lucene Snowball stemming tokenizer for English that uses the Lovins stemming algorithm. + norwegian = "norwegian" #: Selects the Lucene Snowball stemming tokenizer for Norwegian. + porter = "porter" #: Selects the Lucene Snowball stemming tokenizer for English that uses the Porter stemming algorithm. + portuguese = "portuguese" #: Selects the Lucene Snowball stemming tokenizer for Portuguese. + romanian = "romanian" #: Selects the Lucene Snowball stemming tokenizer for Romanian. + russian = "russian" #: Selects the Lucene Snowball stemming tokenizer for Russian. + spanish = "spanish" #: Selects the Lucene Snowball stemming tokenizer for Spanish. + swedish = "swedish" #: Selects the Lucene Snowball stemming tokenizer for Swedish. + turkish = "turkish" #: Selects the Lucene Snowball stemming tokenizer for Turkish. class StemmerTokenFilterLanguage(str, Enum): """The language to use for a stemmer token filter. """ - arabic = "arabic" - armenian = "armenian" - basque = "basque" - brazilian = "brazilian" - bulgarian = "bulgarian" - catalan = "catalan" - czech = "czech" - danish = "danish" - dutch = "dutch" - dutch_kp = "dutchKp" - english = "english" - light_english = "lightEnglish" - minimal_english = "minimalEnglish" - possessive_english = "possessiveEnglish" - porter2 = "porter2" - lovins = "lovins" - finnish = "finnish" - light_finnish = "lightFinnish" - french = "french" - light_french = "lightFrench" - minimal_french = "minimalFrench" - galician = "galician" - minimal_galician = "minimalGalician" - german = "german" - german2 = "german2" - light_german = "lightGerman" - minimal_german = "minimalGerman" - greek = "greek" - hindi = "hindi" - hungarian = "hungarian" - light_hungarian = "lightHungarian" - indonesian = "indonesian" - irish = "irish" - italian = "italian" - light_italian = "lightItalian" - sorani = "sorani" - latvian = "latvian" - norwegian = "norwegian" - light_norwegian = "lightNorwegian" - minimal_norwegian = "minimalNorwegian" - light_nynorsk = "lightNynorsk" - minimal_nynorsk = "minimalNynorsk" - portuguese = "portuguese" - light_portuguese = "lightPortuguese" - minimal_portuguese = "minimalPortuguese" - portuguese_rslp = "portugueseRslp" - romanian = "romanian" - russian = "russian" - light_russian = "lightRussian" - spanish = "spanish" - light_spanish = "lightSpanish" - swedish = "swedish" - light_swedish = "lightSwedish" - turkish = "turkish" + arabic = "arabic" #: Selects the Lucene stemming tokenizer for Arabic. + armenian = "armenian" #: Selects the Lucene stemming tokenizer for Armenian. + basque = "basque" #: Selects the Lucene stemming tokenizer for Basque. + brazilian = "brazilian" #: Selects the Lucene stemming tokenizer for Portuguese (Brazil). + bulgarian = "bulgarian" #: Selects the Lucene stemming tokenizer for Bulgarian. + catalan = "catalan" #: Selects the Lucene stemming tokenizer for Catalan. + czech = "czech" #: Selects the Lucene stemming tokenizer for Czech. + danish = "danish" #: Selects the Lucene stemming tokenizer for Danish. + dutch = "dutch" #: Selects the Lucene stemming tokenizer for Dutch. + dutch_kp = "dutchKp" #: Selects the Lucene stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. + english = "english" #: Selects the Lucene stemming tokenizer for English. + light_english = "lightEnglish" #: Selects the Lucene stemming tokenizer for English that does light stemming. + minimal_english = "minimalEnglish" #: Selects the Lucene stemming tokenizer for English that does minimal stemming. + possessive_english = "possessiveEnglish" #: Selects the Lucene stemming tokenizer for English that removes trailing possessives from words. + porter2 = "porter2" #: Selects the Lucene stemming tokenizer for English that uses the Porter2 stemming algorithm. + lovins = "lovins" #: Selects the Lucene stemming tokenizer for English that uses the Lovins stemming algorithm. + finnish = "finnish" #: Selects the Lucene stemming tokenizer for Finnish. + light_finnish = "lightFinnish" #: Selects the Lucene stemming tokenizer for Finnish that does light stemming. + french = "french" #: Selects the Lucene stemming tokenizer for French. + light_french = "lightFrench" #: Selects the Lucene stemming tokenizer for French that does light stemming. + minimal_french = "minimalFrench" #: Selects the Lucene stemming tokenizer for French that does minimal stemming. + galician = "galician" #: Selects the Lucene stemming tokenizer for Galician. + minimal_galician = "minimalGalician" #: Selects the Lucene stemming tokenizer for Galician that does minimal stemming. + german = "german" #: Selects the Lucene stemming tokenizer for German. + german2 = "german2" #: Selects the Lucene stemming tokenizer that uses the German variant algorithm. + light_german = "lightGerman" #: Selects the Lucene stemming tokenizer for German that does light stemming. + minimal_german = "minimalGerman" #: Selects the Lucene stemming tokenizer for German that does minimal stemming. + greek = "greek" #: Selects the Lucene stemming tokenizer for Greek. + hindi = "hindi" #: Selects the Lucene stemming tokenizer for Hindi. + hungarian = "hungarian" #: Selects the Lucene stemming tokenizer for Hungarian. + light_hungarian = "lightHungarian" #: Selects the Lucene stemming tokenizer for Hungarian that does light stemming. + indonesian = "indonesian" #: Selects the Lucene stemming tokenizer for Indonesian. + irish = "irish" #: Selects the Lucene stemming tokenizer for Irish. + italian = "italian" #: Selects the Lucene stemming tokenizer for Italian. + light_italian = "lightItalian" #: Selects the Lucene stemming tokenizer for Italian that does light stemming. + sorani = "sorani" #: Selects the Lucene stemming tokenizer for Sorani. + latvian = "latvian" #: Selects the Lucene stemming tokenizer for Latvian. + norwegian = "norwegian" #: Selects the Lucene stemming tokenizer for Norwegian (Bokmål). + light_norwegian = "lightNorwegian" #: Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does light stemming. + minimal_norwegian = "minimalNorwegian" #: Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does minimal stemming. + light_nynorsk = "lightNynorsk" #: Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does light stemming. + minimal_nynorsk = "minimalNynorsk" #: Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does minimal stemming. + portuguese = "portuguese" #: Selects the Lucene stemming tokenizer for Portuguese. + light_portuguese = "lightPortuguese" #: Selects the Lucene stemming tokenizer for Portuguese that does light stemming. + minimal_portuguese = "minimalPortuguese" #: Selects the Lucene stemming tokenizer for Portuguese that does minimal stemming. + portuguese_rslp = "portugueseRslp" #: Selects the Lucene stemming tokenizer for Portuguese that uses the RSLP stemming algorithm. + romanian = "romanian" #: Selects the Lucene stemming tokenizer for Romanian. + russian = "russian" #: Selects the Lucene stemming tokenizer for Russian. + light_russian = "lightRussian" #: Selects the Lucene stemming tokenizer for Russian that does light stemming. + spanish = "spanish" #: Selects the Lucene stemming tokenizer for Spanish. + light_spanish = "lightSpanish" #: Selects the Lucene stemming tokenizer for Spanish that does light stemming. + swedish = "swedish" #: Selects the Lucene stemming tokenizer for Swedish. + light_swedish = "lightSwedish" #: Selects the Lucene stemming tokenizer for Swedish that does light stemming. + turkish = "turkish" #: Selects the Lucene stemming tokenizer for Turkish. class StopwordsList(str, Enum): """Identifies a predefined list of language-specific stopwords. """ - arabic = "arabic" - armenian = "armenian" - basque = "basque" - brazilian = "brazilian" - bulgarian = "bulgarian" - catalan = "catalan" - czech = "czech" - danish = "danish" - dutch = "dutch" - english = "english" - finnish = "finnish" - french = "french" - galician = "galician" - german = "german" - greek = "greek" - hindi = "hindi" - hungarian = "hungarian" - indonesian = "indonesian" - irish = "irish" - italian = "italian" - latvian = "latvian" - norwegian = "norwegian" - persian = "persian" - portuguese = "portuguese" - romanian = "romanian" - russian = "russian" - sorani = "sorani" - spanish = "spanish" - swedish = "swedish" - thai = "thai" - turkish = "turkish" + arabic = "arabic" #: Selects the stopword list for Arabic. + armenian = "armenian" #: Selects the stopword list for Armenian. + basque = "basque" #: Selects the stopword list for Basque. + brazilian = "brazilian" #: Selects the stopword list for Portuguese (Brazil). + bulgarian = "bulgarian" #: Selects the stopword list for Bulgarian. + catalan = "catalan" #: Selects the stopword list for Catalan. + czech = "czech" #: Selects the stopword list for Czech. + danish = "danish" #: Selects the stopword list for Danish. + dutch = "dutch" #: Selects the stopword list for Dutch. + english = "english" #: Selects the stopword list for English. + finnish = "finnish" #: Selects the stopword list for Finnish. + french = "french" #: Selects the stopword list for French. + galician = "galician" #: Selects the stopword list for Galician. + german = "german" #: Selects the stopword list for German. + greek = "greek" #: Selects the stopword list for Greek. + hindi = "hindi" #: Selects the stopword list for Hindi. + hungarian = "hungarian" #: Selects the stopword list for Hungarian. + indonesian = "indonesian" #: Selects the stopword list for Indonesian. + irish = "irish" #: Selects the stopword list for Irish. + italian = "italian" #: Selects the stopword list for Italian. + latvian = "latvian" #: Selects the stopword list for Latvian. + norwegian = "norwegian" #: Selects the stopword list for Norwegian. + persian = "persian" #: Selects the stopword list for Persian. + portuguese = "portuguese" #: Selects the stopword list for Portuguese. + romanian = "romanian" #: Selects the stopword list for Romanian. + russian = "russian" #: Selects the stopword list for Russian. + sorani = "sorani" #: Selects the stopword list for Sorani. + spanish = "spanish" #: Selects the stopword list for Spanish. + swedish = "swedish" #: Selects the stopword list for Swedish. + thai = "thai" #: Selects the stopword list for Thai. + turkish = "turkish" #: Selects the stopword list for Turkish. class TextExtractionAlgorithm(str, Enum): """A value indicating which algorithm to use. Default is printed. """ - printed = "printed" - handwritten = "handwritten" + printed = "printed" #: An algorithm suitable for printed text. + handwritten = "handwritten" #: An algorithm suitable for handwritten text. class TextSplitMode(str, Enum): """A value indicating which split mode to perform. """ - pages = "pages" - sentences = "sentences" + pages = "pages" #: Split the text into individual pages. + sentences = "sentences" #: Split the text into individual sentences. diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py index 7ba6982701cc..564a0da54a45 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py @@ -40,36 +40,36 @@ def __init__(self, client, config, serializer, deserializer): def create_or_update( self, data_source_name, # type: str - data_source, # type: "models.DataSource" + data_source, # type: "models.SearchIndexerDataSource" + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): - # type: (...) -> "models.DataSource" + # type: (...) -> "models.SearchIndexerDataSource" """Creates a new datasource or updates a datasource if it already exists. :param data_source_name: The name of the datasource to create or update. :type data_source_name: str :param data_source: The definition of the datasource to create or update. - :type data_source: ~search_service_client.models.DataSource + :type data_source: ~search_service_client.models.SearchIndexerDataSource + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response - :return: DataSource or the result of cls(response) - :rtype: ~search_service_client.models.DataSource or ~search_service_client.models.DataSource + :return: SearchIndexerDataSource or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerDataSource or ~search_service_client.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" @@ -91,17 +91,17 @@ def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(data_source, 'DataSource') + body_content = self._serialize.body(data_source, 'SearchIndexerDataSource') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -115,10 +115,10 @@ def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -129,8 +129,9 @@ def create_or_update( def delete( self, data_source_name, # type: str + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): # type: (...) -> None @@ -138,10 +139,14 @@ def delete( :param data_source_name: The name of the datasource to delete. :type data_source_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None @@ -151,11 +156,6 @@ def delete( error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" @@ -176,10 +176,10 @@ def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -202,7 +202,7 @@ def get( request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.DataSource" + # type: (...) -> "models.SearchIndexerDataSource" """Retrieves a datasource definition. :param data_source_name: The name of the datasource to retrieve. @@ -210,11 +210,11 @@ def get( :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: DataSource or the result of cls(response) - :rtype: ~search_service_client.models.DataSource + :return: SearchIndexerDataSource or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -250,7 +250,7 @@ def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -325,23 +325,23 @@ def list( def create( self, - data_source, # type: "models.DataSource" + data_source, # type: "models.SearchIndexerDataSource" request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.DataSource" + # type: (...) -> "models.SearchIndexerDataSource" """Creates a new datasource. :param data_source: The definition of the datasource to create. - :type data_source: ~search_service_client.models.DataSource + :type data_source: ~search_service_client.models.SearchIndexerDataSource :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: DataSource or the result of cls(response) - :rtype: ~search_service_client.models.DataSource + :return: SearchIndexerDataSource or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -369,7 +369,7 @@ def create( # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(data_source, 'DataSource') + body_content = self._serialize.body(data_source, 'SearchIndexerDataSource') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -381,7 +381,7 @@ def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py index 38ed69e0fb8d..5d4c0ff5609c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py @@ -156,36 +156,36 @@ def run( def create_or_update( self, indexer_name, # type: str - indexer, # type: "models.Indexer" + indexer, # type: "models.SearchIndexer" + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): - # type: (...) -> "models.Indexer" + # type: (...) -> "models.SearchIndexer" """Creates a new indexer or updates an indexer if it already exists. :param indexer_name: The name of the indexer to create or update. :type indexer_name: str :param indexer: The definition of the indexer to create or update. - :type indexer: ~search_service_client.models.Indexer + :type indexer: ~search_service_client.models.SearchIndexer + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response - :return: Indexer or the result of cls(response) - :rtype: ~search_service_client.models.Indexer or ~search_service_client.models.Indexer + :return: SearchIndexer or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexer or ~search_service_client.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" @@ -207,17 +207,17 @@ def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(indexer, 'Indexer') + body_content = self._serialize.body(indexer, 'SearchIndexer') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -231,10 +231,10 @@ def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -245,8 +245,9 @@ def create_or_update( def delete( self, indexer_name, # type: str + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): # type: (...) -> None @@ -254,10 +255,14 @@ def delete( :param indexer_name: The name of the indexer to delete. :type indexer_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None @@ -267,11 +272,6 @@ def delete( error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" @@ -292,10 +292,10 @@ def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -318,7 +318,7 @@ def get( request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.Indexer" + # type: (...) -> "models.SearchIndexer" """Retrieves an indexer definition. :param indexer_name: The name of the indexer to retrieve. @@ -326,11 +326,11 @@ def get( :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Indexer or the result of cls(response) - :rtype: ~search_service_client.models.Indexer + :return: SearchIndexer or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -366,7 +366,7 @@ def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -441,23 +441,23 @@ def list( def create( self, - indexer, # type: "models.Indexer" + indexer, # type: "models.SearchIndexer" request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.Indexer" + # type: (...) -> "models.SearchIndexer" """Creates a new indexer. :param indexer: The definition of the indexer to create. - :type indexer: ~search_service_client.models.Indexer + :type indexer: ~search_service_client.models.SearchIndexer :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Indexer or the result of cls(response) - :rtype: ~search_service_client.models.Indexer + :return: SearchIndexer or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -485,7 +485,7 @@ def create( # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(indexer, 'Indexer') + body_content = self._serialize.body(indexer, 'SearchIndexer') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -497,7 +497,7 @@ def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -511,7 +511,7 @@ def get_status( request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.IndexerExecutionInfo" + # type: (...) -> "models.SearchIndexerStatus" """Returns the current status and execution history of an indexer. :param indexer_name: The name of the indexer for which to retrieve status. @@ -519,11 +519,11 @@ def get_status( :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: IndexerExecutionInfo or the result of cls(response) - :rtype: ~search_service_client.models.IndexerExecutionInfo + :return: SearchIndexerStatus or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerStatus :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.IndexerExecutionInfo"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerStatus"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -559,7 +559,7 @@ def get_status( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('IndexerExecutionInfo', pipeline_response) + deserialized = self._deserialize('SearchIndexerStatus', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py index 8fc85d56b781..bbfbfe51cfeb 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py @@ -7,6 +7,7 @@ import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse @@ -39,23 +40,23 @@ def __init__(self, client, config, serializer, deserializer): def create( self, - index, # type: "models.Index" + index, # type: "models.SearchIndex" request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.Index" + # type: (...) -> "models.SearchIndex" """Creates a new search index. :param index: The definition of the index to create. - :type index: ~search_service_client.models.Index + :type index: ~search_service_client.models.SearchIndex :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Index or the result of cls(response) - :rtype: ~search_service_client.models.Index + :return: SearchIndex or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -83,7 +84,7 @@ def create( # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(index, 'Index') + body_content = self._serialize.body(index, 'SearchIndex') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -95,7 +96,7 @@ def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -113,8 +114,8 @@ def list( """Lists all indexes available for a search service. :param select: Selects which top-level properties of the index definitions to retrieve. - Specified as a comma-separated list of JSON property names, or '*' for all properties. The - default is all properties. + Specified as a comma-separated list of JSON property names, or '*' for all properties. The + default is all properties. :type select: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions @@ -131,83 +132,102 @@ def list( _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _x_ms_client_request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - header_parameters['Accept'] = 'application/json' - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(models.SearchError, response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize('ListIndexesResult', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + else: + url = next_link + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('ListIndexesResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + error = self._deserialize(models.SearchError, response) + map_error(status_code=response.status_code, response=response, error_map=error_map, model=error) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) list.metadata = {'url': '/indexes'} def create_or_update( self, index_name, # type: str - index, # type: "models.Index" + index, # type: "models.SearchIndex" allow_index_downtime=None, # type: Optional[bool] + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): - # type: (...) -> "models.Index" + # type: (...) -> "models.SearchIndex" """Creates a new search index or updates an index if it already exists. :param index_name: The definition of the index to create or update. :type index_name: str :param index: The definition of the index to create or update. - :type index: ~search_service_client.models.Index + :type index: ~search_service_client.models.SearchIndex :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of the index can be impaired for several minutes after the index is updated, or longer for very large indexes. :type allow_index_downtime: bool + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response - :return: Index or the result of cls(response) - :rtype: ~search_service_client.models.Index or ~search_service_client.models.Index + :return: SearchIndex or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndex or ~search_service_client.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" @@ -231,17 +251,17 @@ def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(index, 'Index') + body_content = self._serialize.body(index, 'SearchIndex') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -255,10 +275,10 @@ def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -269,19 +289,24 @@ def create_or_update( def delete( self, index_name, # type: str + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): # type: (...) -> None - """Deletes a search index and all the documents it contains. + """Deletes a search index and all the documents it contains. This operation is permanent, with no recovery option. Make sure you have a master copy of your index definition, data ingestion code, and a backup of the primary data source in case you need to re-build the index. :param index_name: The name of the index to delete. :type index_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None @@ -291,11 +316,6 @@ def delete( error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" @@ -316,10 +336,10 @@ def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -342,7 +362,7 @@ def get( request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.Index" + # type: (...) -> "models.SearchIndex" """Retrieves an index definition. :param index_name: The name of the index to retrieve. @@ -350,11 +370,11 @@ def get( :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Index or the result of cls(response) - :rtype: ~search_service_client.models.Index + :return: SearchIndex or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -390,7 +410,7 @@ def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py index 37ab90d2bb9d..adb17bc79dc4 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py @@ -40,37 +40,37 @@ def __init__(self, client, config, serializer, deserializer): def create_or_update( self, skillset_name, # type: str - skillset, # type: "models.Skillset" + skillset, # type: "models.SearchIndexerSkillset" + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): - # type: (...) -> "models.Skillset" + # type: (...) -> "models.SearchIndexerSkillset" """Creates a new skillset in a search service or updates the skillset if it already exists. :param skillset_name: The name of the skillset to create or update. :type skillset_name: str :param skillset: The skillset containing one or more skills to create or update in a search service. - :type skillset: ~search_service_client.models.Skillset + :type skillset: ~search_service_client.models.SearchIndexerSkillset + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response - :return: Skillset or the result of cls(response) - :rtype: ~search_service_client.models.Skillset or ~search_service_client.models.Skillset + :return: SearchIndexerSkillset or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerSkillset or ~search_service_client.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" @@ -92,17 +92,17 @@ def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(skillset, 'Skillset') + body_content = self._serialize.body(skillset, 'SearchIndexerSkillset') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -116,10 +116,10 @@ def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -130,8 +130,9 @@ def create_or_update( def delete( self, skillset_name, # type: str + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): # type: (...) -> None @@ -139,10 +140,14 @@ def delete( :param skillset_name: The name of the skillset to delete. :type skillset_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None @@ -152,11 +157,6 @@ def delete( error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" @@ -177,10 +177,10 @@ def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -203,7 +203,7 @@ def get( request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.Skillset" + # type: (...) -> "models.SearchIndexerSkillset" """Retrieves a skillset in a search service. :param skillset_name: The name of the skillset to retrieve. @@ -211,11 +211,11 @@ def get( :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Skillset or the result of cls(response) - :rtype: ~search_service_client.models.Skillset + :return: SearchIndexerSkillset or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -251,7 +251,7 @@ def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) @@ -326,23 +326,23 @@ def list( def create( self, - skillset, # type: "models.Skillset" + skillset, # type: "models.SearchIndexerSkillset" request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.Skillset" + # type: (...) -> "models.SearchIndexerSkillset" """Creates a new skillset in a search service. :param skillset: The skillset containing one or more skills to create in a search service. - :type skillset: ~search_service_client.models.Skillset + :type skillset: ~search_service_client.models.SearchIndexerSkillset :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Skillset or the result of cls(response) - :rtype: ~search_service_client.models.Skillset + :return: SearchIndexerSkillset or the result of cls(response) + :rtype: ~search_service_client.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None @@ -370,7 +370,7 @@ def create( # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(skillset, 'Skillset') + body_content = self._serialize.body(skillset, 'SearchIndexerSkillset') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -382,7 +382,7 @@ def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py index ff4e69ec5420..ba6819c14159 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py @@ -41,8 +41,9 @@ def create_or_update( self, synonym_map_name, # type: str synonym_map, # type: "models.SynonymMap" + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): # type: (...) -> "models.SynonymMap" @@ -52,10 +53,14 @@ def create_or_update( :type synonym_map_name: str :param synonym_map: The definition of the synonym map to create or update. :type synonym_map: ~search_service_client.models.SynonymMap + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) :rtype: ~search_service_client.models.SynonymMap or ~search_service_client.models.SynonymMap @@ -65,11 +70,6 @@ def create_or_update( error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" @@ -91,10 +91,10 @@ def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') @@ -129,8 +129,9 @@ def create_or_update( def delete( self, synonym_map_name, # type: str + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): # type: (...) -> None @@ -138,10 +139,14 @@ def delete( :param synonym_map_name: The name of the synonym map to delete. :type synonym_map_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None @@ -151,11 +156,6 @@ def delete( error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" @@ -176,10 +176,10 @@ def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) From e01d6ba6e108aba395c4e741b892b5279b86b15c Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 16:18:22 -0700 Subject: [PATCH 02/20] Analyzer -> LexicalAnalyzer --- .../azure-search-documents/azure/search/documents/__init__.py | 4 ++-- .../azure/search/documents/_service/_models.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index 75893973ec59..0b07826f498c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -43,7 +43,6 @@ edm, ) from ._service._generated.models import ( - Analyzer, AnalyzeRequest, AnalyzeResult, AsciiFoldingTokenFilter, @@ -80,6 +79,7 @@ KeywordTokenizer, LanguageDetectionSkill, LengthTokenFilter, + LexicalAnalyzer, LimitTokenFilter, MagnitudeScoringFunction, MagnitudeScoringParameters, @@ -135,7 +135,6 @@ __all__ = ( "AnalyzeRequest", "AnalyzeResult", - "Analyzer", "AsciiFoldingTokenFilter", "AutocompleteQuery", "AzureActiveDirectoryApplicationCredentials", @@ -175,6 +174,7 @@ "KeywordTokenizer", "LanguageDetectionSkill", "LengthTokenFilter", + "LexicalAnalyzer", "LimitTokenFilter", "MagnitudeScoringFunction", "MagnitudeScoringParameters", diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_models.py index 529af151836f..38b9ab6ce726 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_models.py @@ -3,10 +3,10 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -from ._generated.models import Analyzer, Tokenizer +from ._generated.models import LexicalAnalyzer, Tokenizer -class PatternAnalyzer(Analyzer): +class PatternAnalyzer(LexicalAnalyzer): """Flexibly separates text into terms via a regular expression. This analyzer is implemented using Apache Lucene. From 742a3c5662ae54b54ac692e755f6aad4914271f3 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 16:20:32 -0700 Subject: [PATCH 03/20] StandardAnalyzer -> LuceneStandardAnalyzer --- .../azure-search-documents/azure/search/documents/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index 0b07826f498c..fd71401db495 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -81,6 +81,7 @@ LengthTokenFilter, LexicalAnalyzer, LimitTokenFilter, + LuceneStandardAnalyzer, MagnitudeScoringFunction, MagnitudeScoringParameters, MappingCharFilter, @@ -104,7 +105,6 @@ Skillset, SnowballTokenFilter, SplitSkill, - StandardAnalyzer, StandardTokenizer, StemmerOverrideTokenFilter, StemmerTokenFilter, @@ -176,6 +176,7 @@ "LengthTokenFilter", "LexicalAnalyzer", "LimitTokenFilter", + "LuceneStandardAnalyzer", "MagnitudeScoringFunction", "MagnitudeScoringParameters", "MappingCharFilter", @@ -207,7 +208,6 @@ "Skillset", "SnowballTokenFilter", "SplitSkill", - "StandardAnalyzer", "StandardTokenizer", "StemmerOverrideTokenFilter", "StemmerTokenFilter", From 299f7b01c0bf9ab49611696dda8552b9b47d05a8 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 16:21:39 -0700 Subject: [PATCH 04/20] StandardTokenizer -> LuceneStandardTokenizer --- .../azure-search-documents/azure/search/documents/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index fd71401db495..af150bfaf2e5 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -82,6 +82,7 @@ LexicalAnalyzer, LimitTokenFilter, LuceneStandardAnalyzer, + LuceneStandardTokenizer, MagnitudeScoringFunction, MagnitudeScoringParameters, MappingCharFilter, @@ -105,7 +106,6 @@ Skillset, SnowballTokenFilter, SplitSkill, - StandardTokenizer, StemmerOverrideTokenFilter, StemmerTokenFilter, StopAnalyzer, @@ -177,6 +177,7 @@ "LexicalAnalyzer", "LimitTokenFilter", "LuceneStandardAnalyzer", + "LuceneStandardTokenizer", "MagnitudeScoringFunction", "MagnitudeScoringParameters", "MappingCharFilter", @@ -208,7 +209,6 @@ "Skillset", "SnowballTokenFilter", "SplitSkill", - "StandardTokenizer", "StemmerOverrideTokenFilter", "StemmerTokenFilter", "StopAnalyzer", From 0f823cd5928236f1c9b0049b82e9ba81bb4171fe Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 16:29:18 -0700 Subject: [PATCH 05/20] DataSource -> SearchIndexerDataSource --- .../azure/search/documents/__init__.py | 4 +-- .../documents/_service/_datasources_client.py | 28 ++++++++--------- .../_service/aio/_datasources_client.py | 30 +++++++++---------- 3 files changed, 31 insertions(+), 31 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index af150bfaf2e5..43fa49b20ee1 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -54,7 +54,6 @@ ConditionalSkill, CorsOptions, CustomAnalyzer, - DataSource, DataSourceCredentials, DataContainer, DictionaryDecompounderTokenFilter, @@ -98,6 +97,7 @@ PatternReplaceTokenFilter, PhoneticTokenFilter, RegexFlags, + SearchIndexerDataSource, ScoringFunction, ScoringProfile, SentimentSkill, @@ -146,7 +146,6 @@ "ConditionalSkill", "CorsOptions", "CustomAnalyzer", - "DataSource", "DataSourceCredentials", "DataContainer", "DictionaryDecompounderTokenFilter", @@ -198,6 +197,7 @@ "ScoringFunction", "ScoringProfile", "SearchClient", + "SearchIndexerDataSource", "SearchItemPaged", "SearchQuery", "SearchServiceClient", diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_datasources_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_datasources_client.py index 83ca286a1f36..5788949389a7 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_datasources_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_datasources_client.py @@ -15,7 +15,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from ._generated.models import DataSource + from ._generated.models import SearchIndexerDataSource from typing import Any, Dict, Optional, Sequence, Union from azure.core.credentials import AzureKeyCredential @@ -57,12 +57,12 @@ def close(self): @distributed_trace def create_datasource(self, data_source, **kwargs): - # type: (DataSource, **Any) -> Dict[str, Any] + # type: (SearchIndexerDataSource, **Any) -> Dict[str, Any] """Creates a new datasource. :param data_source: The definition of the datasource to create. - :type data_source: ~search.models.DataSource - :return: The created DataSource + :type data_source: ~search.models.SearchIndexerDataSource + :return: The created SearchIndexerDataSource :rtype: dict .. admonition:: Example: @@ -80,15 +80,15 @@ def create_datasource(self, data_source, **kwargs): @distributed_trace def create_or_update_datasource(self, data_source, name=None, **kwargs): - # type: (DataSource, Optional[str], **Any) -> Dict[str, Any] + # type: (SearchIndexerDataSource, Optional[str], **Any) -> Dict[str, Any] """Creates a new datasource or updates a datasource if it already exists. :param name: The name of the datasource to create or update. :type name: str :param data_source: The definition of the datasource to create or update. - :type data_source: ~search.models.DataSource + :type data_source: ~search.models.SearchIndexerDataSource :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions - :return: The created DataSource + :return: The created SearchIndexerDataSource :rtype: dict """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -114,7 +114,7 @@ def get_datasource(self, name, **kwargs): :param name: The name of the datasource to retrieve. :type name: str - :return: The DataSource that is fetched. + :return: The SearchIndexerDataSource that is fetched. :rtype: dict .. admonition:: Example: @@ -124,7 +124,7 @@ def get_datasource(self, name, **kwargs): :end-before: [END get_data_source] :language: python :dedent: 4 - :caption: Retrieve a DataSource + :caption: Retrieve a SearchIndexerDataSource """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.data_sources.get(name, **kwargs) @@ -132,7 +132,7 @@ def get_datasource(self, name, **kwargs): @distributed_trace def get_datasources(self, **kwargs): - # type: (**Any) -> Sequence[DataSource] + # type: (**Any) -> Sequence[SearchIndexerDataSource] """Lists all datasources available for a search service. :return: List of all the data sources. @@ -145,7 +145,7 @@ def get_datasources(self, **kwargs): :end-before: [END list_data_source] :language: python :dedent: 4 - :caption: List all the DataSources + :caption: List all the SearchIndexerDataSources """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.data_sources.list(**kwargs) @@ -153,13 +153,13 @@ def get_datasources(self, **kwargs): @distributed_trace def delete_datasource(self, data_source, **kwargs): - # type: (Union[str, DataSource], **Any) -> None + # type: (Union[str, SearchIndexerDataSource], **Any) -> None """Deletes a datasource. To use access conditions, the Datasource model must be provided instead of the name. It is enough to provide the name of the datasource to delete unconditionally :param data_source: The datasource to delete. - :type data_source: str or ~search.models.DataSource + :type data_source: str or ~search.models.SearchIndexerDataSource :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :return: None @@ -172,7 +172,7 @@ def delete_datasource(self, data_source, **kwargs): :end-before: [END delete_data_source] :language: python :dedent: 4 - :caption: Delete a DataSource + :caption: Delete a SearchIndexerDataSource """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_datasources_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_datasources_client.py index 65d5dd8ef88a..1f369dfad588 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_datasources_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_datasources_client.py @@ -15,7 +15,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from .._generated.models import DataSource + from .._generated.models import SearchIndexerDataSource from typing import Any, Dict, Optional, Sequence, Union from azure.core.credentials import AzureKeyCredential @@ -57,11 +57,11 @@ async def close(self): @distributed_trace_async async def create_datasource(self, data_source, **kwargs): - # type: (DataSource, **Any) -> Dict[str, Any] + # type: (SearchIndexerDataSource, **Any) -> Dict[str, Any] """Creates a new datasource. :param data_source: The definition of the datasource to create. - :type data_source: ~search.models.DataSource - :return: The created DataSource + :type data_source: ~search.models.SearchIndexerDataSource + :return: The created SearchIndexerDataSource :rtype: dict .. admonition:: Example: @@ -71,7 +71,7 @@ async def create_datasource(self, data_source, **kwargs): :end-before: [END create_data_source_async] :language: python :dedent: 4 - :caption: Create a DataSource + :caption: Create a SearchIndexerDataSource """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.data_sources.create(data_source, **kwargs) @@ -79,15 +79,15 @@ async def create_datasource(self, data_source, **kwargs): @distributed_trace_async async def create_or_update_datasource(self, data_source, name=None, **kwargs): - # type: (DataSource, Optional[str], **Any) -> Dict[str, Any] + # type: (SearchIndexerDataSource, Optional[str], **Any) -> Dict[str, Any] """Creates a new datasource or updates a datasource if it already exists. :param name: The name of the datasource to create or update. :type name: str :param data_source: The definition of the datasource to create or update. - :type data_source: ~search.models.DataSource + :type data_source: ~search.models.SearchIndexerDataSource :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions - :return: The created DataSource + :return: The created SearchIndexerDataSource :rtype: dict """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -108,13 +108,13 @@ async def create_or_update_datasource(self, data_source, name=None, **kwargs): @distributed_trace_async async def delete_datasource(self, data_source, **kwargs): - # type: (Union[str, DataSource], **Any) -> None + # type: (Union[str, SearchIndexerDataSource], **Any) -> None """Deletes a datasource. To use access conditions, the Datasource model must be provided instead of the name. It is enough to provide the name of the datasource to delete unconditionally :param data_source: The datasource to delete. - :type data_source: str or ~search.models.DataSource + :type data_source: str or ~search.models.SearchIndexerDataSource :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :return: None @@ -127,7 +127,7 @@ async def delete_datasource(self, data_source, **kwargs): :end-before: [END delete_data_source_async] :language: python :dedent: 4 - :caption: Delete a DataSource + :caption: Delete a SearchIndexerDataSource """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( @@ -152,14 +152,14 @@ async def get_datasource(self, name, **kwargs): :param name: The name of the datasource to retrieve. :type name: str - :return: The DataSource that is fetched. + :return: The SearchIndexerDataSource that is fetched. .. literalinclude:: ../samples/async_samples/sample_data_source_operations_async.py :start-after: [START get_data_source_async] :end-before: [END get_data_source_async] :language: python :dedent: 4 - :caption: Retrieve a DataSource + :caption: Retrieve a SearchIndexerDataSource """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.data_sources.get(name, **kwargs) @@ -167,7 +167,7 @@ async def get_datasource(self, name, **kwargs): @distributed_trace_async async def get_datasources(self, **kwargs): - # type: (**Any) -> Sequence[DataSource] + # type: (**Any) -> Sequence[SearchIndexerDataSource] """Lists all datasources available for a search service. :return: List of all the data sources. @@ -180,7 +180,7 @@ async def get_datasources(self, **kwargs): :end-before: [END list_data_source_async] :language: python :dedent: 4 - :caption: List all DataSources + :caption: List all SearchIndexerDataSources """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.data_sources.list(**kwargs) From 748e517f02eee0979ffe72154f6ab546b0a7e14b Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 16:31:36 -0700 Subject: [PATCH 06/20] DataContainer -> SearchIndexerDataContainer --- .../azure-search-documents/azure/search/documents/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index 43fa49b20ee1..ef1d7fb99d3e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -55,7 +55,6 @@ CorsOptions, CustomAnalyzer, DataSourceCredentials, - DataContainer, DictionaryDecompounderTokenFilter, DistanceScoringFunction, DistanceScoringParameters, @@ -97,6 +96,7 @@ PatternReplaceTokenFilter, PhoneticTokenFilter, RegexFlags, + SearchIndexerDataContainer, SearchIndexerDataSource, ScoringFunction, ScoringProfile, @@ -147,7 +147,6 @@ "CorsOptions", "CustomAnalyzer", "DataSourceCredentials", - "DataContainer", "DictionaryDecompounderTokenFilter", "DistanceScoringFunction", "DistanceScoringParameters", @@ -197,6 +196,7 @@ "ScoringFunction", "ScoringProfile", "SearchClient", + "SearchIndexerDataContainer", "SearchIndexerDataSource", "SearchItemPaged", "SearchQuery", From 520fb79615c20505f47322341c9cd87ff0e0f99a Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 16:38:27 -0700 Subject: [PATCH 07/20] Skillset -> SearchIndexerSkillset --- .../azure/search/documents/__init__.py | 4 +- .../documents/_service/_skillsets_client.py | 68 +++++++++---------- .../azure/search/documents/_service/_utils.py | 1 - .../_service/aio/_skillsets_client.py | 68 +++++++++---------- 4 files changed, 70 insertions(+), 71 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index ef1d7fb99d3e..df0f49636bb7 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -98,12 +98,12 @@ RegexFlags, SearchIndexerDataContainer, SearchIndexerDataSource, + SearchIndexerSkillset, ScoringFunction, ScoringProfile, SentimentSkill, ShaperSkill, ShingleTokenFilter, - Skillset, SnowballTokenFilter, SplitSkill, StemmerOverrideTokenFilter, @@ -198,6 +198,7 @@ "SearchClient", "SearchIndexerDataContainer", "SearchIndexerDataSource", + "SearchIndexerSkillset", "SearchItemPaged", "SearchQuery", "SearchServiceClient", @@ -206,7 +207,6 @@ "ShaperSkill", "ShingleTokenFilter", "SimpleField", - "Skillset", "SnowballTokenFilter", "SplitSkill", "StemmerOverrideTokenFilter", diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py index 5556aa26db03..c9d0d68dfc71 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py @@ -10,7 +10,7 @@ from azure.core.exceptions import ClientAuthenticationError, ResourceNotFoundError from ._generated import SearchServiceClient as _SearchServiceClient -from ._generated.models import Skillset +from ._generated.models import SearchIndexerSkillset from ._utils import get_access_conditions from .._headers_mixin import HeadersMixin from .._version import SDK_MONIKER @@ -59,10 +59,10 @@ def close(self): @distributed_trace def get_skillsets(self, **kwargs): - # type: (**Any) -> List[Skillset] - """List the Skillsets in an Azure Search service. + # type: (**Any) -> List[SearchIndexerSkillset] + """List the SearchIndexerSkillsets in an Azure Search service. - :return: List of Skillsets + :return: List of SearchIndexerSkillsets :rtype: list[dict] :raises: ~azure.core.exceptions.HttpResponseError @@ -73,7 +73,7 @@ def get_skillsets(self, **kwargs): :end-before: [END get_skillsets] :language: python :dedent: 4 - :caption: List Skillsets + :caption: List SearchIndexerSkillsets """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -82,12 +82,12 @@ def get_skillsets(self, **kwargs): @distributed_trace def get_skillset(self, name, **kwargs): - # type: (str, **Any) -> Skillset - """Retrieve a named Skillset in an Azure Search service + # type: (str, **Any) -> SearchIndexerSkillset + """Retrieve a named SearchIndexerSkillset in an Azure Search service - :param name: The name of the Skillset to get + :param name: The name of the SearchIndexerSkillset to get :type name: str - :return: The retrieved Skillset + :return: The retrieved SearchIndexerSkillset :rtype: dict :raises: :class:`~azure.core.exceptions.ResourceNotFoundError` @@ -98,7 +98,7 @@ def get_skillset(self, name, **kwargs): :end-before: [END get_skillset] :language: python :dedent: 4 - :caption: Get a Skillset + :caption: Get a SearchIndexerSkillset """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -106,13 +106,13 @@ def get_skillset(self, name, **kwargs): @distributed_trace def delete_skillset(self, skillset, **kwargs): - # type: (Union[str, Skillset], **Any) -> None - """Delete a named Skillset in an Azure Search service. To use access conditions, - the Skillset model must be provided instead of the name. It is enough to provide + # type: (Union[str, SearchIndexerSkillset], **Any) -> None + """Delete a named SearchIndexerSkillset in an Azure Search service. To use access conditions, + the SearchIndexerSkillset model must be provided instead of the name. It is enough to provide the name of the skillset to delete unconditionally - :param name: The Skillset to delete - :type name: str or ~search.models.Skillset + :param name: The SearchIndexerSkillset to delete + :type name: str or ~search.models.SearchIndexerSkillset :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions @@ -123,7 +123,7 @@ def delete_skillset(self, skillset, **kwargs): :end-before: [END delete_skillset] :language: python :dedent: 4 - :caption: Delete a Skillset + :caption: Delete a SearchIndexerSkillset """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -139,16 +139,16 @@ def delete_skillset(self, skillset, **kwargs): @distributed_trace def create_skillset(self, name, skills, description, **kwargs): - # type: (str, Sequence[Skill], str, **Any) -> Skillset - """Create a new Skillset in an Azure Search service + # type: (str, Sequence[Skill], str, **Any) -> SearchIndexerSkillset + """Create a new SearchIndexerSkillset in an Azure Search service - :param name: The name of the Skillset to create + :param name: The name of the SearchIndexerSkillset to create :type name: str - :param skills: A list of Skill objects to include in the Skillset + :param skills: A list of Skill objects to include in the SearchIndexerSkillset :type skills: List[Skill]] - :param description: A description for the Skillset + :param description: A description for the SearchIndexerSkillset :type description: Optional[str] - :return: The created Skillset + :return: The created SearchIndexerSkillset :rtype: dict .. admonition:: Example: @@ -158,33 +158,33 @@ def create_skillset(self, name, skills, description, **kwargs): :end-before: [END create_skillset] :language: python :dedent: 4 - :caption: Create a Skillset + :caption: Create a SearchIndexerSkillset """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - skillset = Skillset(name=name, skills=list(skills), description=description) + skillset = SearchIndexerSkillset(name=name, skills=list(skills), description=description) return self._client.skillsets.create(skillset, **kwargs) @distributed_trace def create_or_update_skillset(self, name, **kwargs): - # type: (str, **Any) -> Skillset - """Create a new Skillset in an Azure Search service, or update an + # type: (str, **Any) -> SearchIndexerSkillset + """Create a new SearchIndexerSkillset in an Azure Search service, or update an existing one. The skillset param must be provided to perform the operation with access conditions. - :param name: The name of the Skillset to create or update + :param name: The name of the SearchIndexerSkillset to create or update :type name: str - :keyword skills: A list of Skill objects to include in the Skillset + :keyword skills: A list of Skill objects to include in the SearchIndexerSkillset :type skills: List[Skill] - :keyword description: A description for the Skillset + :keyword description: A description for the SearchIndexerSkillset :type description: Optional[str] - :keyword skillset: A Skillset to create or update. - :type skillset: :class:`~azure.search.documents.Skillset` + :keyword skillset: A SearchIndexerSkillset to create or update. + :type skillset: :class:`~azure.search.documents.SearchIndexerSkillset` :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions - :return: The created or updated Skillset + :return: The created or updated SearchIndexerSkillset :rtype: dict If a `skillset` is passed in, any optional `skills`, or @@ -204,14 +204,14 @@ def create_or_update_skillset(self, name, **kwargs): skillset, kwargs.pop('match_condition', MatchConditions.Unconditionally) ) - skillset = Skillset.deserialize(skillset.serialize()) + skillset = SearchIndexerSkillset.deserialize(skillset.serialize()) skillset.name = name for param in ("description", "skills"): if param in kwargs: setattr(skillset, param, kwargs.pop(param)) else: - skillset = Skillset( + skillset = SearchIndexerSkillset( name=name, description=kwargs.pop("description", None), skills=kwargs.pop("skills", None), diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py index 6ad76de52769..6e9a1ea13b41 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py @@ -24,7 +24,6 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports from typing import Optional - from ._generated.models import Skillset DELIMITER = "|" diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py index 75bfadfaef47..f6abf6b66b43 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py @@ -10,7 +10,7 @@ from azure.core.tracing.decorator_async import distributed_trace_async from .._generated.aio import SearchServiceClient as _SearchServiceClient -from .._generated.models import Skillset +from .._generated.models import SearchIndexerSkillset from .._utils import get_access_conditions from ..._headers_mixin import HeadersMixin from ..._version import SDK_MONIKER @@ -59,10 +59,10 @@ async def close(self): @distributed_trace_async async def get_skillsets(self, **kwargs): - # type: (**Any) -> List[Skillset] - """List the Skillsets in an Azure Search service. + # type: (**Any) -> List[SearchIndexerSkillset] + """List the SearchIndexerSkillsets in an Azure Search service. - :return: List of Skillsets + :return: List of SearchIndexerSkillsets :rtype: list[dict] :raises: ~azure.core.exceptions.HttpResponseError @@ -73,7 +73,7 @@ async def get_skillsets(self, **kwargs): :end-before: [END get_skillsets] :language: python :dedent: 4 - :caption: List Skillsets + :caption: List SearchIndexerSkillsets """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -82,12 +82,12 @@ async def get_skillsets(self, **kwargs): @distributed_trace_async async def get_skillset(self, name, **kwargs): - # type: (str, **Any) -> Skillset - """Retrieve a named Skillset in an Azure Search service + # type: (str, **Any) -> SearchIndexerSkillset + """Retrieve a named SearchIndexerSkillset in an Azure Search service - :param name: The name of the Skillset to get + :param name: The name of the SearchIndexerSkillset to get :type name: str - :return: The retrieved Skillset + :return: The retrieved SearchIndexerSkillset :rtype: dict :raises: :class:`~azure.core.exceptions.ResourceNotFoundError` @@ -98,7 +98,7 @@ async def get_skillset(self, name, **kwargs): :end-before: [END get_skillset] :language: python :dedent: 4 - :caption: Get a Skillset + :caption: Get a SearchIndexerSkillset """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -106,13 +106,13 @@ async def get_skillset(self, name, **kwargs): @distributed_trace_async async def delete_skillset(self, skillset, **kwargs): - # type: (Union[str, Skillset], **Any) -> None - """Delete a named Skillset in an Azure Search service. To use access conditions, - the Skillset model must be provided instead of the name. It is enough to provide + # type: (Union[str, SearchIndexerSkillset], **Any) -> None + """Delete a named SearchIndexerSkillset in an Azure Search service. To use access conditions, + the SearchIndexerSkillset model must be provided instead of the name. It is enough to provide the name of the skillset to delete unconditionally - :param name: The Skillset to delete - :type name: str or ~search.models.Skillset + :param name: The SearchIndexerSkillset to delete + :type name: str or ~search.models.SearchIndexerSkillset :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions @@ -123,7 +123,7 @@ async def delete_skillset(self, skillset, **kwargs): :end-before: [END delete_skillset] :language: python :dedent: 4 - :caption: Delete a Skillset + :caption: Delete a SearchIndexerSkillset """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -139,16 +139,16 @@ async def delete_skillset(self, skillset, **kwargs): @distributed_trace_async async def create_skillset(self, name, skills, description, **kwargs): - # type: (str, Sequence[Skill], str, **Any) -> Skillset - """Create a new Skillset in an Azure Search service + # type: (str, Sequence[Skill], str, **Any) -> SearchIndexerSkillset + """Create a new SearchIndexerSkillset in an Azure Search service - :param name: The name of the Skillset to create + :param name: The name of the SearchIndexerSkillset to create :type name: str - :param skills: A list of Skill objects to include in the Skillset + :param skills: A list of Skill objects to include in the SearchIndexerSkillset :type skills: List[Skill]] - :param description: A description for the Skillset + :param description: A description for the SearchIndexerSkillset :type description: Optional[str] - :return: The created Skillset + :return: The created SearchIndexerSkillset :rtype: dict .. admonition:: Example: @@ -158,33 +158,33 @@ async def create_skillset(self, name, skills, description, **kwargs): :end-before: [END create_skillset] :language: python :dedent: 4 - :caption: Create a Skillset + :caption: Create a SearchIndexerSkillset """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - skillset = Skillset(name=name, skills=list(skills), description=description) + skillset = SearchIndexerSkillset(name=name, skills=list(skills), description=description) return await self._client.skillsets.create(skillset, **kwargs) @distributed_trace_async async def create_or_update_skillset(self, name, **kwargs): - # type: (str, **Any) -> Skillset - """Create a new Skillset in an Azure Search service, or update an + # type: (str, **Any) -> SearchIndexerSkillset + """Create a new SearchIndexerSkillset in an Azure Search service, or update an existing one. The skillset param must be provided to perform the operation with access conditions. - :param name: The name of the Skillset to create or update + :param name: The name of the SearchIndexerSkillset to create or update :type name: str - :keyword skills: A list of Skill objects to include in the Skillset + :keyword skills: A list of Skill objects to include in the SearchIndexerSkillset :type skills: List[Skill] - :keyword description: A description for the Skillset + :keyword description: A description for the SearchIndexerSkillset :type description: Optional[str] - :keyword skillset: A Skillset to create or update. - :type skillset: :class:`~azure.search.documents.Skillset` + :keyword skillset: A SearchIndexerSkillset to create or update. + :type skillset: :class:`~azure.search.documents.SearchIndexerSkillset` :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions - :return: The created or updated Skillset + :return: The created or updated SearchIndexerSkillset :rtype: dict If a `skillset` is passed in, any optional `skills`, or @@ -205,14 +205,14 @@ async def create_or_update_skillset(self, name, **kwargs): skillset, kwargs.pop('match_condition', MatchConditions.Unconditionally) ) - skillset = Skillset.deserialize(skillset.serialize()) + skillset = SearchIndexerSkillset.deserialize(skillset.serialize()) skillset.name = name for param in ("description", "skills"): if param in kwargs: setattr(skillset, param, kwargs.pop(param)) else: - skillset = Skillset( + skillset = SearchIndexerSkillset( name=name, description=kwargs.pop("description", None), skills=kwargs.pop("skills", None), From ed51f76c108c13c780d5dcbe7ea3a9cc9c8ee179 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 16:40:49 -0700 Subject: [PATCH 08/20] Skill -> SearchIndexerSkill --- .../azure/search/documents/_service/_skillsets_client.py | 8 ++++---- .../search/documents/_service/_synonym_maps_client.py | 1 - .../search/documents/_service/aio/_skillsets_client.py | 8 ++++---- .../search/documents/_service/aio/_synonym_maps_client.py | 1 - 4 files changed, 8 insertions(+), 10 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py index c9d0d68dfc71..865d3f7aef39 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py @@ -17,7 +17,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from ._generated.models import Skill + from ._generated.models import SearchIndexerSkill from typing import Any, List, Sequence, Union from azure.core.credentials import AzureKeyCredential @@ -139,13 +139,13 @@ def delete_skillset(self, skillset, **kwargs): @distributed_trace def create_skillset(self, name, skills, description, **kwargs): - # type: (str, Sequence[Skill], str, **Any) -> SearchIndexerSkillset + # type: (str, Sequence[SearchIndexerSkill], str, **Any) -> SearchIndexerSkillset """Create a new SearchIndexerSkillset in an Azure Search service :param name: The name of the SearchIndexerSkillset to create :type name: str :param skills: A list of Skill objects to include in the SearchIndexerSkillset - :type skills: List[Skill]] + :type skills: List[SearchIndexerSkill]] :param description: A description for the SearchIndexerSkillset :type description: Optional[str] :return: The created SearchIndexerSkillset @@ -177,7 +177,7 @@ def create_or_update_skillset(self, name, **kwargs): :param name: The name of the SearchIndexerSkillset to create or update :type name: str :keyword skills: A list of Skill objects to include in the SearchIndexerSkillset - :type skills: List[Skill] + :type skills: List[SearchIndexerSkill] :keyword description: A description for the SearchIndexerSkillset :type description: Optional[str] :keyword skillset: A SearchIndexerSkillset to create or update. diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_synonym_maps_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_synonym_maps_client.py index d5fe5ab9082a..8a8f259d0035 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_synonym_maps_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_synonym_maps_client.py @@ -16,7 +16,6 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from ._generated.models import Skill from typing import Any, Dict, List, Sequence, Union, Optional from azure.core.credentials import AzureKeyCredential diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py index f6abf6b66b43..6282d69b70d5 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py @@ -17,7 +17,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from .._generated.models import Skill + from .._generated.models import SearchIndexerSkill from typing import Any, List, Sequence, Union from azure.core.credentials import AzureKeyCredential @@ -139,13 +139,13 @@ async def delete_skillset(self, skillset, **kwargs): @distributed_trace_async async def create_skillset(self, name, skills, description, **kwargs): - # type: (str, Sequence[Skill], str, **Any) -> SearchIndexerSkillset + # type: (str, Sequence[SearchIndexerSkill], str, **Any) -> SearchIndexerSkillset """Create a new SearchIndexerSkillset in an Azure Search service :param name: The name of the SearchIndexerSkillset to create :type name: str :param skills: A list of Skill objects to include in the SearchIndexerSkillset - :type skills: List[Skill]] + :type skills: List[SearchIndexerSkill]] :param description: A description for the SearchIndexerSkillset :type description: Optional[str] :return: The created SearchIndexerSkillset @@ -177,7 +177,7 @@ async def create_or_update_skillset(self, name, **kwargs): :param name: The name of the SearchIndexerSkillset to create or update :type name: str :keyword skills: A list of Skill objects to include in the SearchIndexerSkillset - :type skills: List[Skill] + :type skills: List[SearchIndexerSkill] :keyword description: A description for the SearchIndexerSkillset :type description: Optional[str] :keyword skillset: A SearchIndexerSkillset to create or update. diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_synonym_maps_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_synonym_maps_client.py index 7818d6834cd0..87ef99692c63 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_synonym_maps_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_synonym_maps_client.py @@ -16,7 +16,6 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from .._generated.models import Skill from typing import Any, Dict, List, Sequence, Union, Optional from azure.core.credentials import AzureKeyCredential From 61e36fd00bebd27cd061cbcc8922e41cabdb35aa Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 16:42:33 -0700 Subject: [PATCH 09/20] TokenInfo -> AnalyzedTokenInfo --- .../azure-search-documents/azure/search/documents/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index df0f49636bb7..ef63ac9450cf 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -45,6 +45,7 @@ from ._service._generated.models import ( AnalyzeRequest, AnalyzeResult, + AnalyzedTokenInfo, AsciiFoldingTokenFilter, AzureActiveDirectoryApplicationCredentials, CharFilter, @@ -118,7 +119,6 @@ TextTranslationSkill, TextWeights, TokenFilter, - TokenInfo, Tokenizer, TruncateTokenFilter, UaxUrlEmailTokenizer, @@ -135,6 +135,7 @@ __all__ = ( "AnalyzeRequest", "AnalyzeResult", + "AnalyzedTokenInfo", "AsciiFoldingTokenFilter", "AutocompleteQuery", "AzureActiveDirectoryApplicationCredentials", @@ -222,7 +223,6 @@ "TextTranslationSkill", "TextWeights", "TokenFilter", - "TokenInfo", "Tokenizer", "TruncateTokenFilter", "UaxUrlEmailTokenizer", From 2523747c312d90ccdaefbba0ef8927ec98b61cdb Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 16:46:53 -0700 Subject: [PATCH 10/20] EncryptionKey -> SearchResourceEncryptionKey --- .../azure-search-documents/azure/search/documents/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index ef63ac9450cf..f277ede6ad37 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -62,7 +62,6 @@ EdgeNGramTokenFilter, EdgeNGramTokenizer, ElisionTokenFilter, - EncryptionKey, EntityRecognitionSkill, Field, FreshnessScoringFunction, @@ -100,6 +99,7 @@ SearchIndexerDataContainer, SearchIndexerDataSource, SearchIndexerSkillset, + SearchResourceEncryptionKey, ScoringFunction, ScoringProfile, SentimentSkill, @@ -154,7 +154,6 @@ "EdgeNGramTokenFilter", "EdgeNGramTokenizer", "ElisionTokenFilter", - "EncryptionKey", "EntityRecognitionSkill", "Field", "FreshnessScoringFunction", @@ -202,6 +201,7 @@ "SearchIndexerSkillset", "SearchItemPaged", "SearchQuery", + "SearchResourceEncryptionKey", "SearchServiceClient", "SearchableField", "SentimentSkill", From e5854d7efccbc3037e0e4f45cc75e13c106d0274 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 16:48:59 -0700 Subject: [PATCH 11/20] IndexerExecutionInfo -> SearchIndexerStatus --- .../azure/search/documents/_service/_indexers_client.py | 8 ++++---- .../search/documents/_service/aio/_indexers_client.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py index 0d6a6259f4fd..09ad971be1ec 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py @@ -15,7 +15,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from ._generated.models import Indexer, IndexerExecutionInfo + from ._generated.models import Indexer, SearchIndexerStatus from typing import Any, Dict, Optional, Sequence from azure.core.credentials import AzureKeyCredential @@ -234,14 +234,14 @@ def reset_indexer(self, name, **kwargs): @distributed_trace def get_indexer_status(self, name, **kwargs): - # type: (str, **Any) -> IndexerExecutionInfo + # type: (str, **Any) -> cSearchIndexerStatus """Get the status of the indexer. :param name: The name of the indexer to fetch the status. :type name: str - :return: IndexerExecutionInfo - :rtype: IndexerExecutionInfo + :return: SearchIndexerStatus + :rtype: SearchIndexerStatus .. admonition:: Example: diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py index c17509beaf96..1b68dc85d338 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py @@ -15,7 +15,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from .._generated.models import Indexer, IndexerExecutionInfo + from .._generated.models import Indexer, SearchIndexerStatus from typing import Any, Dict, Optional, Sequence from azure.core.credentials import AzureKeyCredential @@ -234,14 +234,14 @@ async def reset_indexer(self, name, **kwargs): @distributed_trace_async async def get_indexer_status(self, name, **kwargs): - # type: (str, **Any) -> IndexerExecutionInfo + # type: (str, **Any) -> SearchIndexerStatus """Get the status of the indexer. :param name: The name of the indexer to fetch the status. :type name: str - :return: IndexerExecutionInfo - :rtype: IndexerExecutionInfo + :return: SearchIndexerStatus + :rtype: SearchIndexerStatus .. admonition:: Example: From 8720cb79355cfd775628ec38da74d3f298108bd0 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 16:52:29 -0700 Subject: [PATCH 12/20] Indexer -> SearchIndexer --- .../azure/search/documents/__init__.py | 4 +- .../documents/_service/_indexers_client.py | 52 +++++++++---------- .../_service/aio/_indexers_client.py | 42 +++++++-------- 3 files changed, 49 insertions(+), 49 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index f277ede6ad37..02788af5f895 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -69,7 +69,6 @@ GetIndexStatisticsResult, ImageAnalysisSkill, Index, - Indexer, InputFieldMappingEntry, KeepTokenFilter, KeyPhraseExtractionSkill, @@ -96,6 +95,7 @@ PatternReplaceTokenFilter, PhoneticTokenFilter, RegexFlags, + SearchIndexer, SearchIndexerDataContainer, SearchIndexerDataSource, SearchIndexerSkillset, @@ -161,7 +161,6 @@ "GetIndexStatisticsResult", "ImageAnalysisSkill", "Index", - "Indexer", "IndexAction", "IndexDocumentsBatch", "IndexingResult", @@ -196,6 +195,7 @@ "ScoringFunction", "ScoringProfile", "SearchClient", + "SearchIndexer", "SearchIndexerDataContainer", "SearchIndexerDataSource", "SearchIndexerSkillset", diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py index 09ad971be1ec..efaa24a56e60 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py @@ -15,7 +15,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from ._generated.models import Indexer, SearchIndexerStatus + from ._generated.models import SearchIndexer, SearchIndexerStatus from typing import Any, Dict, Optional, Sequence from azure.core.credentials import AzureKeyCredential @@ -57,13 +57,13 @@ def close(self): @distributed_trace def create_indexer(self, indexer, **kwargs): - # type: (Indexer, **Any) -> Indexer - """Creates a new Indexers. + # type: (SearchIndexer, **Any) -> SearchIndexer + """Creates a new SearchIndexer. :param indexer: The definition of the indexer to create. - :type indexer: ~~azure.search.documents.Indexer - :return: The created Indexer - :rtype: ~azure.search.documents.Indexer + :type indexer: ~~azure.search.documents.SearchIndexer + :return: The created SearchIndexer + :rtype: ~azure.search.documents.SearchIndexer .. admonition:: Example: @@ -72,7 +72,7 @@ def create_indexer(self, indexer, **kwargs): :end-before: [END create_indexer] :language: python :dedent: 4 - :caption: Create an Indexer + :caption: Create a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.indexers.create(indexer, **kwargs) @@ -80,15 +80,15 @@ def create_indexer(self, indexer, **kwargs): @distributed_trace def create_or_update_indexer(self, indexer, name=None, **kwargs): - # type: (Indexer, Optional[str], **Any) -> Indexer + # type: (SearchIndexer, Optional[str], **Any) -> SearchIndexer """Creates a new indexer or updates a indexer if it already exists. :param name: The name of the indexer to create or update. :type name: str :param indexer: The definition of the indexer to create or update. - :type indexer: ~azure.search.documents.Indexer - :return: The created Indexer - :rtype: ~azure.search.documents.Indexer + :type indexer: ~azure.search.documents.SearchIndexer + :return: The created IndexSearchIndexerer + :rtype: ~azure.search.documents.SearchIndexer """ error_map, access_condition = get_access_conditions( indexer, @@ -109,13 +109,13 @@ def create_or_update_indexer(self, indexer, name=None, **kwargs): @distributed_trace def get_indexer(self, name, **kwargs): - # type: (str, **Any) -> Indexer + # type: (str, **Any) -> SearchIndexer """Retrieves a indexer definition. :param name: The name of the indexer to retrieve. :type name: str - :return: The Indexer that is fetched. - :rtype: ~azure.search.documents.Indexer + :return: The SearchIndexer that is fetched. + :rtype: ~azure.search.documents.SearchIndexer .. admonition:: Example: @@ -124,7 +124,7 @@ def get_indexer(self, name, **kwargs): :end-before: [END get_indexer] :language: python :dedent: 4 - :caption: Retrieve an Indexer + :caption: Retrieve a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.indexers.get(name, **kwargs) @@ -132,10 +132,10 @@ def get_indexer(self, name, **kwargs): @distributed_trace def get_indexers(self, **kwargs): - # type: (**Any) -> Sequence[Indexer] + # type: (**Any) -> Sequence[SearchIndexer] """Lists all indexers available for a search service. - :return: List of all the Indexers. + :return: List of all the SearchIndexers. :rtype: `list[dict]` .. admonition:: Example: @@ -145,7 +145,7 @@ def get_indexers(self, **kwargs): :end-before: [END list_indexer] :language: python :dedent: 4 - :caption: List all the Indexers + :caption: List all the SearchIndexers """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.indexers.list(**kwargs) @@ -153,13 +153,13 @@ def get_indexers(self, **kwargs): @distributed_trace def delete_indexer(self, indexer, **kwargs): - # type: (Union[str, Indexer], **Any) -> None - """Deletes an indexer. To use access conditions, the Indexer model + # type: (Union[str, SearchIndexer], **Any) -> None + """Deletes an indexer. To use access conditions, the SearchIndexer model must be provided instead of the name. It is enough to provide the name of the indexer to delete unconditionally. :param indexer: The indexer to delete. - :type indexer: str or ~azure.search.documents.Indexer + :type indexer: str or ~azure.search.documents.SearchIndexer :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions @@ -173,7 +173,7 @@ def delete_indexer(self, indexer, **kwargs): :end-before: [END delete_indexer] :language: python :dedent: 4 - :caption: Delete an Indexer + :caption: Delete a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( @@ -204,7 +204,7 @@ def run_indexer(self, name, **kwargs): :end-before: [END run_indexer] :language: python :dedent: 4 - :caption: Run an Indexer + :caption: Run a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) self._client.indexers.run(name, **kwargs) @@ -227,14 +227,14 @@ def reset_indexer(self, name, **kwargs): :end-before: [END reset_indexer] :language: python :dedent: 4 - :caption: Reset an Indexer's change tracking state + :caption: Reset a SearchIndexer's change tracking state """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) self._client.indexers.reset(name, **kwargs) @distributed_trace def get_indexer_status(self, name, **kwargs): - # type: (str, **Any) -> cSearchIndexerStatus + # type: (str, **Any) -> SearchIndexerStatus """Get the status of the indexer. :param name: The name of the indexer to fetch the status. @@ -250,7 +250,7 @@ def get_indexer_status(self, name, **kwargs): :end-before: [END get_indexer_status] :language: python :dedent: 4 - :caption: Get an Indexer's status + :caption: Get a SearchIndexer's status """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) return self._client.indexers.get_status(name, **kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py index 1b68dc85d338..5a52a012bac9 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py @@ -15,7 +15,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from .._generated.models import Indexer, SearchIndexerStatus + from .._generated.models import SearchIndexer, SearchIndexerStatus from typing import Any, Dict, Optional, Sequence from azure.core.credentials import AzureKeyCredential @@ -57,12 +57,12 @@ def close(self): @distributed_trace_async async def create_indexer(self, indexer, **kwargs): - # type: (Indexer, **Any) -> Indexer - """Creates a new Indexers. + # type: (SearchIndexer, **Any) -> SearchIndexer + """Creates a new SearchIndexer. :param indexer: The definition of the indexer to create. - :type indexer: ~azure.search.documents.Indexer - :return: The created Indexer + :type indexer: ~azure.search.documents.SearchIndexer + :return: The created SearchIndexer :rtype: dict .. admonition:: Example: @@ -72,7 +72,7 @@ async def create_indexer(self, indexer, **kwargs): :end-before: [END create_indexer_async] :language: python :dedent: 4 - :caption: Create an Indexer + :caption: Create a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.indexers.create(indexer, **kwargs) @@ -80,14 +80,14 @@ async def create_indexer(self, indexer, **kwargs): @distributed_trace_async async def create_or_update_indexer(self, indexer, name=None, **kwargs): - # type: (Indexer, Optional[str], **Any) -> Indexer + # type: (SearchIndexer, Optional[str], **Any) -> SearchIndexer """Creates a new indexer or updates a indexer if it already exists. :param name: The name of the indexer to create or update. :type name: str :param indexer: The definition of the indexer to create or update. - :type indexer: ~azure.search.documents.Indexer - :return: The created Indexer + :type indexer: ~azure.search.documents.SearchIndexer + :return: The created SearchIndexer :rtype: dict """ error_map, access_condition = get_access_conditions( @@ -109,12 +109,12 @@ async def create_or_update_indexer(self, indexer, name=None, **kwargs): @distributed_trace_async async def get_indexer(self, name, **kwargs): - # type: (str, **Any) -> Indexer + # type: (str, **Any) -> SearchIndexer """Retrieves a indexer definition. :param name: The name of the indexer to retrieve. :type name: str - :return: The Indexer that is fetched. + :return: The SearchIndexer that is fetched. :rtype: dict .. admonition:: Example: @@ -124,7 +124,7 @@ async def get_indexer(self, name, **kwargs): :end-before: [END get_indexer_async] :language: python :dedent: 4 - :caption: Retrieve an Indexer + :caption: Retrieve a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.indexers.get(name, **kwargs) @@ -132,10 +132,10 @@ async def get_indexer(self, name, **kwargs): @distributed_trace_async async def get_indexers(self, **kwargs): - # type: (**Any) -> Sequence[Indexer] + # type: (**Any) -> Sequence[SearchIndexer] """Lists all indexers available for a search service. - :return: List of all the Indexers. + :return: List of all the SearchIndexers. :rtype: `list[dict]` .. admonition:: Example: @@ -145,7 +145,7 @@ async def get_indexers(self, **kwargs): :end-before: [END list_indexer_async] :language: python :dedent: 4 - :caption: List all the Indexers + :caption: List all the SearchIndexers """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.indexers.list(**kwargs) @@ -153,8 +153,8 @@ async def get_indexers(self, **kwargs): @distributed_trace_async async def delete_indexer(self, indexer, **kwargs): - # type: (Union[str, Indexer], **Any) -> None - """Deletes an indexer. To use access conditions, the Indexer model + # type: (Union[str, SearchIndexer], **Any) -> None + """Deletes an indexer. To use access conditions, the SearchIndexer model must be provided instead of the name. It is enough to provide the name of the indexer to delete unconditionally. @@ -173,7 +173,7 @@ async def delete_indexer(self, indexer, **kwargs): :end-before: [END delete_indexer_async] :language: python :dedent: 4 - :caption: Delete an Indexer + :caption: Delete a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( @@ -204,7 +204,7 @@ async def run_indexer(self, name, **kwargs): :end-before: [END run_indexer_async] :language: python :dedent: 4 - :caption: Run an Indexer + :caption: Run a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) await self._client.indexers.run(name, **kwargs) @@ -227,7 +227,7 @@ async def reset_indexer(self, name, **kwargs): :end-before: [END reset_indexer_async] :language: python :dedent: 4 - :caption: Reset an Indexer's change tracking state + :caption: Reset a SearchIndexer's change tracking state """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) await self._client.indexers.reset(name, **kwargs) @@ -250,7 +250,7 @@ async def get_indexer_status(self, name, **kwargs): :end-before: [END get_indexer_status_async] :language: python :dedent: 4 - :caption: Get an Indexer's status + :caption: Get a SearchIndexer's status """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) return await self._client.indexers.get_status(name, **kwargs) From dded0d9cf437b217ad0f1227df71d26ab2979000 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 16:56:20 -0700 Subject: [PATCH 13/20] Tokenizer -> LexicalTokenizer --- .../azure-search-documents/azure/search/documents/__init__.py | 4 ++-- .../azure/search/documents/_service/_models.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index 02788af5f895..3023f133ce94 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -77,6 +77,7 @@ LanguageDetectionSkill, LengthTokenFilter, LexicalAnalyzer, + LexicalTokenizer, LimitTokenFilter, LuceneStandardAnalyzer, LuceneStandardTokenizer, @@ -119,7 +120,6 @@ TextTranslationSkill, TextWeights, TokenFilter, - Tokenizer, TruncateTokenFilter, UaxUrlEmailTokenizer, UniqueTokenFilter, @@ -172,6 +172,7 @@ "LanguageDetectionSkill", "LengthTokenFilter", "LexicalAnalyzer", + "LexicalTokenizer", "LimitTokenFilter", "LuceneStandardAnalyzer", "LuceneStandardTokenizer", @@ -223,7 +224,6 @@ "TextTranslationSkill", "TextWeights", "TokenFilter", - "Tokenizer", "TruncateTokenFilter", "UaxUrlEmailTokenizer", "UniqueTokenFilter", diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_models.py index 38b9ab6ce726..6b2fd3491b35 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_models.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -from ._generated.models import LexicalAnalyzer, Tokenizer +from ._generated.models import LexicalAnalyzer, LexicalTokenizer class PatternAnalyzer(LexicalAnalyzer): @@ -49,7 +49,7 @@ def __init__(self, **kwargs): self.stopwords = kwargs.get("stopwords", None) -class PatternTokenizer(Tokenizer): +class PatternTokenizer(LexicalTokenizer): """Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. From 9dafa5e2890f13522b58fbc3098171a194fa870f Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 17:01:27 -0700 Subject: [PATCH 14/20] Field -> SearchField --- .../azure/search/documents/__init__.py | 4 ++-- .../azure/search/documents/_service/_index.py | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index 3023f133ce94..6d1c37db67c7 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -63,7 +63,6 @@ EdgeNGramTokenizer, ElisionTokenFilter, EntityRecognitionSkill, - Field, FreshnessScoringFunction, FreshnessScoringParameters, GetIndexStatisticsResult, @@ -96,6 +95,7 @@ PatternReplaceTokenFilter, PhoneticTokenFilter, RegexFlags, + SearchField, SearchIndexer, SearchIndexerDataContainer, SearchIndexerDataSource, @@ -155,7 +155,6 @@ "EdgeNGramTokenizer", "ElisionTokenFilter", "EntityRecognitionSkill", - "Field", "FreshnessScoringFunction", "FreshnessScoringParameters", "GetIndexStatisticsResult", @@ -196,6 +195,7 @@ "ScoringFunction", "ScoringProfile", "SearchClient", + "SearchField", "SearchIndexer", "SearchIndexerDataContainer", "SearchIndexerDataSource", diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_index.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_index.py index a76d1c1976cb..e8601794c933 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_index.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_index.py @@ -6,7 +6,7 @@ from typing import TYPE_CHECKING from .edm import Collection, ComplexType -from ._generated.models import Field +from ._generated.models import SearchField if TYPE_CHECKING: from typing import Any, Dict, List @@ -15,7 +15,7 @@ def SimpleField(**kw): - # type: (**Any) -> Dict[str, Any] + # type: (**Any) -> SearchField """Configure a simple field for an Azure Search Index :param name: Required. The name of the field, which must be unique within the fields collection @@ -67,11 +67,11 @@ def SimpleField(**kw): result["facetable"] = kw.get("facetable", False) result["sortable"] = kw.get("sortable", False) result["retrievable"] = not kw.get("hidden", False) - return Field(**result) + return SearchField(**result) def SearchableField(**kw): - # type: (**Any) -> Dict[str, Any] + # type: (**Any) -> SearchField """Configure a searchable text field for an Azure Search Index :param name: Required. The name of the field, which must be unique within the fields collection @@ -204,11 +204,11 @@ def SearchableField(**kw): result["index_analyzer"] = kw["index_analyzer"] if "synonym_maps" in kw: result["synonym_maps"] = kw["synonym_maps"] - return Field(**result) + return SearchField(**result) def ComplexField(**kw): - # type: (**Any) -> Dict[str, Any] + # type: (**Any) -> SearchField """Configure a Complex or Complex collection field for an Azure Search Index @@ -225,4 +225,4 @@ def ComplexField(**kw): typ = Collection(ComplexType) if kw.get("collection", False) else ComplexType result = {"name": kw.get("name"), "type": typ} # type: Dict[str, Any] result["fields"] = kw.get("fields") - return Field(**result) + return SearchField(**result) From 80b84f1709866eb94c742cf5b78d1b15b4964e9f Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 7 May 2020 17:07:13 -0700 Subject: [PATCH 15/20] Index -> SearchIndex --- .../azure/search/documents/__init__.py | 4 +-- .../documents/_service/_indexes_client.py | 28 +++++++++---------- .../azure/search/documents/_service/_utils.py | 6 ++-- .../documents/_service/aio/_indexes_client.py | 28 +++++++++---------- 4 files changed, 33 insertions(+), 33 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index 6d1c37db67c7..7576aefad901 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -67,7 +67,6 @@ FreshnessScoringParameters, GetIndexStatisticsResult, ImageAnalysisSkill, - Index, InputFieldMappingEntry, KeepTokenFilter, KeyPhraseExtractionSkill, @@ -96,6 +95,7 @@ PhoneticTokenFilter, RegexFlags, SearchField, + SearchIndex, SearchIndexer, SearchIndexerDataContainer, SearchIndexerDataSource, @@ -159,7 +159,6 @@ "FreshnessScoringParameters", "GetIndexStatisticsResult", "ImageAnalysisSkill", - "Index", "IndexAction", "IndexDocumentsBatch", "IndexingResult", @@ -196,6 +195,7 @@ "ScoringProfile", "SearchClient", "SearchField", + "SearchIndex", "SearchIndexer", "SearchIndexerDataContainer", "SearchIndexerDataSource", diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexes_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexes_client.py index 355cffc065d9..ec7dde364c98 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexes_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexes_client.py @@ -20,7 +20,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from ._generated.models import AnalyzeRequest, AnalyzeResult, Index + from ._generated.models import AnalyzeRequest, AnalyzeResult, SearchIndex from typing import Any, Dict, List, Union from azure.core.credentials import AzureKeyCredential @@ -62,11 +62,11 @@ def close(self): @distributed_trace def list_indexes(self, **kwargs): - # type: (**Any) -> ItemPaged[Index] + # type: (**Any) -> ItemPaged[SearchIndex] """List the indexes in an Azure Search service. :return: List of indexes - :rtype: list[~azure.search.documents.Index] + :rtype: list[~azure.search.documents.SearchIndex] :raises: ~azure.core.exceptions.HttpResponseError """ @@ -82,13 +82,13 @@ def extract_data(response): @distributed_trace def get_index(self, index_name, **kwargs): - # type: (str, **Any) -> Index + # type: (str, **Any) -> SearchIndex """ :param index_name: The name of the index to retrieve. :type index_name: str - :return: Index object - :rtype: ~azure.search.documents.Index + :return: SearchIndex object + :rtype: ~azure.search.documents.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError .. admonition:: Example: @@ -123,12 +123,12 @@ def get_index_statistics(self, index_name, **kwargs): @distributed_trace def delete_index(self, index, **kwargs): - # type: (Union[str, Index], **Any) -> None + # type: (Union[str, SearchIndex], **Any) -> None """Deletes a search index and all the documents it contains. The model must be provided instead of the name to use the access conditions. :param index: The index to retrieve. - :type index: str or ~search.models.Index + :type index: str or ~search.models.SearchIndex :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :raises: ~azure.core.exceptions.HttpResponseError @@ -160,13 +160,13 @@ def delete_index(self, index, **kwargs): @distributed_trace def create_index(self, index, **kwargs): - # type: (Index, **Any) -> Index + # type: (SearchIndex, **Any) -> SearchIndex """Creates a new search index. :param index: The index object. - :type index: ~azure.search.documents.Index + :type index: ~azure.search.documents.SearchIndex :return: The index created - :rtype: ~azure.search.documents.Index + :rtype: ~azure.search.documents.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError .. admonition:: Example: @@ -191,13 +191,13 @@ def create_or_update_index( allow_index_downtime=None, **kwargs ): - # type: (str, Index, bool, **Any) -> Index + # type: (str, SearchIndex, bool, **Any) -> SearchIndex """Creates a new search index or updates an index if it already exists. :param index_name: The name of the index. :type index_name: str :param index: The index object. - :type index: ~azure.search.documents.Index + :type index: ~azure.search.documents.SearchIndex :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of @@ -207,7 +207,7 @@ def create_or_update_index( :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :return: The index created or updated - :rtype: :class:`~azure.search.documents.Index` + :rtype: :class:`~azure.search.documents.SearchIndex` :raises: :class:`~azure.core.exceptions.ResourceNotFoundError`, \ :class:`~azure.core.exceptions.ResourceModifiedError`, \ :class:`~azure.core.exceptions.ResourceNotModifiedError`, \ diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py index 6e9a1ea13b41..b0ea5311ec36 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py @@ -14,7 +14,7 @@ ResourceNotModifiedError, ) from ._generated.models import ( - Index, + SearchIndex, PatternAnalyzer as _PatternAnalyzer, PatternTokenizer as _PatternTokenizer, AccessCondition @@ -117,7 +117,7 @@ def listize_flags_for_pattern_tokenizer(pattern_tokenizer): def delistize_flags_for_index(index): - # type: (Index) -> Index + # type: (SearchIndex) -> SearchIndex if index.analyzers: index.analyzers = [ delistize_flags_for_pattern_analyzer(x) # type: ignore @@ -136,7 +136,7 @@ def delistize_flags_for_index(index): def listize_flags_for_index(index): - # type: (Index) -> Index + # type: (SearchIndex) -> SearchIndex if index.analyzers: index.analyzers = [ listize_flags_for_pattern_analyzer(x) # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexes_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexes_client.py index c7571ef63566..2a5fc67e7d5e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexes_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexes_client.py @@ -19,7 +19,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from .._generated.models import AnalyzeRequest, AnalyzeResult, Index + from .._generated.models import AnalyzeRequest, AnalyzeResult, SearchIndex from typing import Any, Dict, List, Union from azure.core.credentials import AzureKeyCredential @@ -61,11 +61,11 @@ async def close(self): @distributed_trace_async async def list_indexes(self, **kwargs): - # type: (**Any) -> AsyncItemPaged[Index] + # type: (**Any) -> AsyncItemPaged[SearchIndex] """List the indexes in an Azure Search service. :return: List of indexes - :rtype: list[~azure.search.documents.Index] + :rtype: list[~azure.search.documents.SearchIndex] :raises: ~azure.core.exceptions.HttpResponseError """ @@ -81,13 +81,13 @@ async def extract_data(response): @distributed_trace_async async def get_index(self, index_name, **kwargs): - # type: (str, **Any) -> Index + # type: (str, **Any) -> SearchIndex """ :param index_name: The name of the index to retrieve. :type index_name: str - :return: Index object - :rtype: ~azure.search.documents.Index + :return: SearchIndex object + :rtype: ~azure.search.documents.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError .. admonition:: Example: @@ -122,12 +122,12 @@ async def get_index_statistics(self, index_name, **kwargs): @distributed_trace_async async def delete_index(self, index, **kwargs): - # type: (Union[str, Index], **Any) -> None + # type: (Union[str, SearchIndex], **Any) -> None """Deletes a search index and all the documents it contains. The model must be provided instead of the name to use the access conditions :param index: The index to retrieve. - :type index: str or ~search.models.Index + :type index: str or ~search.models.SearchIndex :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :raises: ~azure.core.exceptions.HttpResponseError @@ -159,13 +159,13 @@ async def delete_index(self, index, **kwargs): @distributed_trace_async async def create_index(self, index, **kwargs): - # type: (Index, **Any) -> Index + # type: (SearchIndex, **Any) -> SearchIndex """Creates a new search index. :param index: The index object. - :type index: ~azure.search.documents.Index + :type index: ~azure.search.documents.SearchIndex :return: The index created - :rtype: ~azure.search.documents.Index + :rtype: ~azure.search.documents.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError .. admonition:: Example: @@ -190,13 +190,13 @@ async def create_or_update_index( allow_index_downtime=None, **kwargs ): - # type: (str, Index, bool, MatchConditions, **Any) -> Index + # type: (str, SearchIndex, bool, MatchConditions, **Any) -> SearchIndex """Creates a new search index or updates an index if it already exists. :param index_name: The name of the index. :type index_name: str :param index: The index object. - :type index: ~azure.search.documents.Index + :type index: ~azure.search.documents.SearchIndex :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of @@ -206,7 +206,7 @@ async def create_or_update_index( :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :return: The index created or updated - :rtype: :class:`~azure.search.documents.Index` + :rtype: :class:`~azure.search.documents.SearchIndex` :raises: :class:`~azure.core.exceptions.ResourceNotFoundError`, \ :class:`~azure.core.exceptions.ResourceModifiedError`, \ :class:`~azure.core.exceptions.ResourceNotModifiedError`, \ From 9dfc8e62cce3101483aaf21f1f6bfc2171108c72 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Fri, 8 May 2020 13:23:58 -0700 Subject: [PATCH 16/20] update for AccessCondition --- .../documents/_service/_datasources_client.py | 14 ++++------- .../documents/_service/_indexers_client.py | 19 ++++++-------- .../documents/_service/_indexes_client.py | 24 ++++++------------ .../documents/_service/_skillsets_client.py | 25 ++++++++----------- .../_service/_synonym_maps_client.py | 14 ++++------- .../azure/search/documents/_service/_utils.py | 11 +++----- .../_service/aio/_datasources_client.py | 14 ++++------- .../_service/aio/_indexers_client.py | 19 ++++++-------- .../documents/_service/aio/_indexes_client.py | 24 ++++++------------ .../_service/aio/_skillsets_client.py | 25 ++++++++----------- .../_service/aio/_synonym_maps_client.py | 14 ++++------- 11 files changed, 74 insertions(+), 129 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_datasources_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_datasources_client.py index 5788949389a7..0f04fe5407f8 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_datasources_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_datasources_client.py @@ -93,15 +93,14 @@ def create_or_update_datasource(self, data_source, name=None, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - data_source, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + data_source, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) if not name: name = data_source.name result = self._client.data_sources.create_or_update( data_source_name=name, data_source=data_source, - access_condition=access_condition, error_map=error_map, **kwargs ) @@ -176,16 +175,13 @@ def delete_datasource(self, data_source, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - data_source, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + data_source, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = data_source.name except AttributeError: name = data_source self._client.data_sources.delete( - data_source_name=name, - access_condition=access_condition, - error_map=error_map, - **kwargs + data_source_name=name, error_map=error_map, **kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py index efaa24a56e60..38d1d81ff7bb 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py @@ -90,20 +90,15 @@ def create_or_update_indexer(self, indexer, name=None, **kwargs): :return: The created IndexSearchIndexerer :rtype: ~azure.search.documents.SearchIndexer """ + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - indexer, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + indexer, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) - kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - + kwargs.update(access_condition) if not name: name = indexer.name result = self._client.indexers.create_or_update( - indexer_name=name, - indexer=indexer, - access_condition=access_condition, - error_map=error_map, - **kwargs + indexer_name=name, indexer=indexer, error_map=error_map, **kwargs ) return result @@ -177,14 +172,14 @@ def delete_indexer(self, indexer, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - indexer, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + indexer, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = indexer.name except AttributeError: name = indexer - self._client.indexers.delete(name, access_condition=access_condition, error_map=error_map, **kwargs) + self._client.indexers.delete(name, error_map=error_map, **kwargs) @distributed_trace def run_indexer(self, name, **kwargs): diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexes_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexes_client.py index ec7dde364c98..bd65a30a3b87 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexes_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexes_client.py @@ -13,7 +13,7 @@ from ._utils import ( delistize_flags_for_index, listize_flags_for_index, - get_access_conditions + get_access_conditions, ) from .._headers_mixin import HeadersMixin from .._version import SDK_MONIKER @@ -144,18 +144,15 @@ def delete_index(self, index, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - index, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + index, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: index_name = index.name except AttributeError: index_name = index self._client.indexes.delete( - index_name=index_name, - access_condition=access_condition, - error_map=error_map, - **kwargs + index_name=index_name, error_map=error_map, **kwargs ) @distributed_trace @@ -185,11 +182,7 @@ def create_index(self, index, **kwargs): @distributed_trace def create_or_update_index( - self, - index_name, - index, - allow_index_downtime=None, - **kwargs + self, index_name, index, allow_index_downtime=None, **kwargs ): # type: (str, SearchIndex, bool, **Any) -> SearchIndex """Creates a new search index or updates an index if it already exists. @@ -223,17 +216,16 @@ def create_or_update_index( :dedent: 4 :caption: Update an index. """ + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - index, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + index, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) - kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) + kwargs.update(access_condition) patched_index = delistize_flags_for_index(index) result = self._client.indexes.create_or_update( index_name=index_name, index=patched_index, allow_index_downtime=allow_index_downtime, - access_condition=access_condition, error_map=error_map, **kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py index 865d3f7aef39..c02a78bfe99b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py @@ -128,14 +128,14 @@ def delete_skillset(self, skillset, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - skillset, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + skillset, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = skillset.name except AttributeError: name = skillset - self._client.skillsets.delete(name, access_condition=access_condition, error_map=error_map, **kwargs) + self._client.skillsets.delete(name, error_map=error_map, **kwargs) @distributed_trace def create_skillset(self, name, skills, description, **kwargs): @@ -163,7 +163,9 @@ def create_skillset(self, name, skills, description, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - skillset = SearchIndexerSkillset(name=name, skills=list(skills), description=description) + skillset = SearchIndexerSkillset( + name=name, skills=list(skills), description=description + ) return self._client.skillsets.create(skillset, **kwargs) @@ -192,18 +194,15 @@ def create_or_update_skillset(self, name, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError - } + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError} access_condition = None if "skillset" in kwargs: skillset = kwargs.pop("skillset") error_map, access_condition = get_access_conditions( - skillset, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + skillset, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) skillset = SearchIndexerSkillset.deserialize(skillset.serialize()) skillset.name = name for param in ("description", "skills"): @@ -218,9 +217,5 @@ def create_or_update_skillset(self, name, **kwargs): ) return self._client.skillsets.create_or_update( - skillset_name=name, - skillset=skillset, - access_condition=access_condition, - error_map=error_map, - **kwargs + skillset_name=name, skillset=skillset, error_map=error_map, **kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_synonym_maps_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_synonym_maps_client.py index 8a8f259d0035..965f173c666a 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_synonym_maps_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_synonym_maps_client.py @@ -129,18 +129,15 @@ def delete_synonym_map(self, synonym_map, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - synonym_map, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + synonym_map, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = synonym_map.name except AttributeError: name = synonym_map self._client.synonym_maps.delete( - synonym_map_name=name, - access_condition=access_condition, - error_map=error_map, - **kwargs + synonym_map_name=name, error_map=error_map, **kwargs ) @distributed_trace @@ -189,9 +186,9 @@ def create_or_update_synonym_map(self, synonym_map, synonyms=None, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - synonym_map, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + synonym_map, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = synonym_map.name if synonyms: @@ -203,7 +200,6 @@ def create_or_update_synonym_map(self, synonym_map, synonyms=None, **kwargs): result = self._client.synonym_maps.create_or_update( synonym_map_name=name, synonym_map=synonym_map, - access_condition=access_condition, error_map=error_map, **kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py index b0ea5311ec36..8b98feb73502 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py @@ -17,7 +17,6 @@ SearchIndex, PatternAnalyzer as _PatternAnalyzer, PatternTokenizer as _PatternTokenizer, - AccessCondition ) from ._models import PatternAnalyzer, PatternTokenizer @@ -159,12 +158,10 @@ def listize_synonyms(synonym_map): synonym_map["synonyms"] = synonym_map["synonyms"].split("\n") return synonym_map + def get_access_conditions(model, match_condition=MatchConditions.Unconditionally): - # type: (Any, MatchConditions) -> Tuple[Dict[int, Any], AccessCondition] - error_map = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError - } + # type: (Any, MatchConditions) -> Tuple[Dict[int, Any], Dict[str, bool]] + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError} if isinstance(model, six.string_types): if match_condition is not MatchConditions.Unconditionally: @@ -183,6 +180,6 @@ def get_access_conditions(model, match_condition=MatchConditions.Unconditionally error_map[412] = ResourceNotFoundError if match_condition == MatchConditions.IfMissing: error_map[412] = ResourceExistsError - return (error_map, AccessCondition(if_match=if_match, if_none_match=if_none_match)) + return (error_map, dict(if_match=if_match, if_none_match=if_none_match)) except AttributeError: raise ValueError("Unable to get e_tag from the model") diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_datasources_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_datasources_client.py index 1f369dfad588..25b6ccf36c40 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_datasources_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_datasources_client.py @@ -92,15 +92,14 @@ async def create_or_update_datasource(self, data_source, name=None, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - data_source, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + data_source, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) if not name: name = data_source.name result = await self._client.data_sources.create_or_update( data_source_name=name, data_source=data_source, - access_condition=access_condition, error_map=error_map, **kwargs ) @@ -131,18 +130,15 @@ async def delete_datasource(self, data_source, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - data_source, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + data_source, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = data_source.name except AttributeError: name = data_source await self._client.data_sources.delete( - data_source_name=name, - access_condition=access_condition, - error_map=error_map, - **kwargs + data_source_name=name, error_map=error_map, **kwargs ) @distributed_trace_async diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py index 5a52a012bac9..68d4963c2cb0 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py @@ -90,20 +90,15 @@ async def create_or_update_indexer(self, indexer, name=None, **kwargs): :return: The created SearchIndexer :rtype: dict """ + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - indexer, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + indexer, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) - kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - + kwargs.update(access_condition) if not name: name = indexer.name result = await self._client.indexers.create_or_update( - indexer_name=name, - indexer=indexer, - access_condition=access_condition, - error_map=error_map, - **kwargs + indexer_name=name, indexer=indexer, error_map=error_map, **kwargs ) return result @@ -177,14 +172,14 @@ async def delete_indexer(self, indexer, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - indexer, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + indexer, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = indexer.name except AttributeError: name = indexer - await self._client.indexers.delete(name, access_condition=access_condition, error_map=error_map, **kwargs) + await self._client.indexers.delete(name, error_map=error_map, **kwargs) @distributed_trace_async async def run_indexer(self, name, **kwargs): diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexes_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexes_client.py index 2a5fc67e7d5e..19cdd6bc2daf 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexes_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexes_client.py @@ -12,7 +12,7 @@ from .._utils import ( delistize_flags_for_index, listize_flags_for_index, - get_access_conditions + get_access_conditions, ) from ..._headers_mixin import HeadersMixin from ..._version import SDK_MONIKER @@ -143,18 +143,15 @@ async def delete_index(self, index, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - index, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + index, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: index_name = index.name except AttributeError: index_name = index await self._client.indexes.delete( - index_name=index_name, - access_condition=access_condition, - error_map=error_map, - **kwargs + index_name=index_name, error_map=error_map, **kwargs ) @distributed_trace_async @@ -184,11 +181,7 @@ async def create_index(self, index, **kwargs): @distributed_trace_async async def create_or_update_index( - self, - index_name, - index, - allow_index_downtime=None, - **kwargs + self, index_name, index, allow_index_downtime=None, **kwargs ): # type: (str, SearchIndex, bool, MatchConditions, **Any) -> SearchIndex """Creates a new search index or updates an index if it already exists. @@ -222,17 +215,16 @@ async def create_or_update_index( :dedent: 4 :caption: Update an index. """ + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - index, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + index, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) - kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) + kwargs.update(access_condition) patched_index = delistize_flags_for_index(index) result = await self._client.indexes.create_or_update( index_name=index_name, index=patched_index, allow_index_downtime=allow_index_downtime, - access_condition=access_condition, error_map=error_map, **kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py index 6282d69b70d5..a53f74508589 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py @@ -128,14 +128,14 @@ async def delete_skillset(self, skillset, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - skillset, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + skillset, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = skillset.name except AttributeError: name = skillset - await self._client.skillsets.delete(name, access_condition=access_condition, error_map=error_map, **kwargs) + await self._client.skillsets.delete(name, error_map=error_map, **kwargs) @distributed_trace_async async def create_skillset(self, name, skills, description, **kwargs): @@ -163,7 +163,9 @@ async def create_skillset(self, name, skills, description, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - skillset = SearchIndexerSkillset(name=name, skills=list(skills), description=description) + skillset = SearchIndexerSkillset( + name=name, skills=list(skills), description=description + ) return await self._client.skillsets.create(skillset, **kwargs) @@ -193,18 +195,15 @@ async def create_or_update_skillset(self, name, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError - } + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError} access_condition = None if "skillset" in kwargs: skillset = kwargs.pop("skillset") error_map, access_condition = get_access_conditions( - skillset, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + skillset, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) skillset = SearchIndexerSkillset.deserialize(skillset.serialize()) skillset.name = name for param in ("description", "skills"): @@ -219,9 +218,5 @@ async def create_or_update_skillset(self, name, **kwargs): ) return await self._client.skillsets.create_or_update( - skillset_name=name, - skillset=skillset, - access_condition=access_condition, - error_map=error_map, - **kwargs + skillset_name=name, skillset=skillset, error_map=error_map, **kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_synonym_maps_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_synonym_maps_client.py index 87ef99692c63..a3d6f88dead0 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_synonym_maps_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_synonym_maps_client.py @@ -130,18 +130,15 @@ async def delete_synonym_map(self, synonym_map, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - synonym_map, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + synonym_map, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = synonym_map.name except AttributeError: name = synonym_map await self._client.synonym_maps.delete( - synonym_map_name=name, - access_condition=access_condition, - error_map=error_map, - **kwargs + synonym_map_name=name, error_map=error_map, **kwargs ) @distributed_trace_async @@ -190,9 +187,9 @@ async def create_or_update_synonym_map(self, synonym_map, synonyms=None, **kwarg """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - synonym_map, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + synonym_map, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = synonym_map.name if synonyms: @@ -204,7 +201,6 @@ async def create_or_update_synonym_map(self, synonym_map, synonyms=None, **kwarg result = await self._client.synonym_maps.create_or_update( synonym_map_name=name, synonym_map=synonym_map, - access_condition=access_condition, error_map=error_map, **kwargs ) From 5e4e27ebff7e7f0fdd2ace7626caab6e830627ee Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Fri, 8 May 2020 13:46:39 -0700 Subject: [PATCH 17/20] update tests --- .../azure/search/documents/_service/_utils.py | 2 +- .../_service/aio/_indexers_client.py | 14 +++--- .../async_tests/test_service_live_async.py | 43 +++++++++---------- .../tests/test_regex_flags.py | 12 +++--- .../tests/test_service_live.py | 43 +++++++++---------- 5 files changed, 56 insertions(+), 58 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py index 8b98feb73502..c467ff0b0116 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py @@ -166,7 +166,7 @@ def get_access_conditions(model, match_condition=MatchConditions.Unconditionally if isinstance(model, six.string_types): if match_condition is not MatchConditions.Unconditionally: raise ValueError("A model must be passed to use access conditions") - return (error_map, None) + return (error_map, {}) try: if_match = prep_if_match(model.e_tag, match_condition) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py index 68d4963c2cb0..cba6743bffcf 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py @@ -39,21 +39,21 @@ def __init__(self, endpoint, credential, **kwargs): endpoint=endpoint, sdk_moniker=SDK_MONIKER, **kwargs ) # type: _SearchServiceClient - def __enter__(self): + async def __aenter__(self): # type: () -> SearchIndexersClient - self._client.__enter__() # pylint:disable=no-member + await self._client.__aenter__() # pylint:disable=no-member return self - def __exit__(self, *args): + async def __aexit__(self, *args): # type: (*Any) -> None - return self._client.__exit__(*args) # pylint:disable=no-member + return await self._client.__aexit__(*args) # pylint:disable=no-member - def close(self): + async def close(self): # type: () -> None - """Close the :class:`~azure.search.documents.SearchIndexersClient` session. + """Close the :class:`~azure.search.documents.aio.SearchIndexersClient` session. """ - return self._client.close() + return await self._client.close() @distributed_trace_async async def create_indexer(self, indexer, **kwargs): diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py index d9c8d18700f4..60994ffe248f 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py @@ -24,17 +24,16 @@ AnalyzeResult, CorsOptions, EntityRecognitionSkill, - Field, - Index, + SearchIndex, InputFieldMappingEntry, OutputFieldMappingEntry, SearchServiceClient, ScoringProfile, - Skillset, + SearchIndexerSkillset, DataSourceCredentials, - DataSource, - DataContainer, - Indexer, + SearchIndexerDataSource, + SearchIndexerDataContainer, + SearchIndexer, SynonymMap, SimpleField, edm @@ -143,7 +142,7 @@ async def test_delete_indexes_if_unchanged(self, api_key, endpoint, index_name, scoring_profiles = [] scoring_profiles.append(scoring_profile) cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -173,7 +172,7 @@ async def test_create_index(self, api_key, endpoint, index_name, **kwargs): scoring_profiles = [] scoring_profiles.append(scoring_profile) cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -196,7 +195,7 @@ async def test_create_or_update_index(self, api_key, endpoint, index_name, **kwa cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) scoring_profiles = [] - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -211,7 +210,7 @@ async def test_create_or_update_index(self, api_key, endpoint, index_name, **kwa ) scoring_profiles = [] scoring_profiles.append(scoring_profile) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -245,7 +244,7 @@ async def test_create_or_update_indexes_if_unchanged(self, api_key, endpoint, in scoring_profiles = [] scoring_profiles.append(scoring_profile) cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -379,7 +378,7 @@ async def test_create_skillset(self, api_key, endpoint, index_name, **kwargs): outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")]) result = await client.create_skillset(name='test-ss', skills=[s], description="desc") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc" assert result.e_tag @@ -430,7 +429,7 @@ async def test_get_skillset(self, api_key, endpoint, index_name, **kwargs): assert len(await client.get_skillsets()) == 1 result = await client.get_skillset("test-ss") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc" assert result.e_tag @@ -448,7 +447,7 @@ async def test_get_skillsets(self, api_key, endpoint, index_name, **kwargs): await client.create_skillset(name='test-ss-2', skills=[s], description="desc2") result = await client.get_skillsets() assert isinstance(result, list) - assert all(isinstance(x, Skillset) for x in result) + assert all(isinstance(x, SearchIndexerSkillset) for x in result) assert set(x.name for x in result) == {"test-ss-1", "test-ss-2"} @SearchResourceGroupPreparer(random_name_enabled=True) @@ -463,7 +462,7 @@ async def test_create_or_update_skillset(self, api_key, endpoint, index_name, ** assert len(await client.get_skillsets()) == 1 result = await client.get_skillset("test-ss") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc2" @@ -479,7 +478,7 @@ async def test_create_or_update_skillset_inplace(self, api_key, endpoint, index_ assert len(await client.get_skillsets()) == 1 result = await client.get_skillset("test-ss") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc2" @@ -505,8 +504,8 @@ class SearchDataSourcesClientTest(AzureMgmtTestCase): def _create_datasource(self, name="sample-datasource"): credentials = DataSourceCredentials(connection_string=CONNECTION_STRING) - container = DataContainer(name='searchcontainer') - data_source = DataSource( + container = SearchIndexerDataContainer(name='searchcontainer') + data_source = SearchIndexerDataSource( name=name, type="azureblob", credentials=credentials, @@ -611,8 +610,8 @@ async def _prepare_indexer(self, endpoint, api_key, name="sample-indexer", ds_na con_str = self.settings.AZURE_STORAGE_CONNECTION_STRING self.scrubber.register_name_pair(con_str, 'connection_string') credentials = DataSourceCredentials(connection_string=con_str) - container = DataContainer(name='searchcontainer') - data_source = DataSource( + container = SearchIndexerDataContainer(name='searchcontainer') + data_source = SearchIndexerDataSource( name=ds_name, type="azureblob", credentials=credentials, @@ -630,10 +629,10 @@ async def _prepare_indexer(self, endpoint, api_key, name="sample-indexer", ds_na "key": True, "searchable": False }] - index = Index(name=index_name, fields=fields) + index = SearchIndex(name=index_name, fields=fields) ind_client = client.get_indexes_client() ind = await ind_client.create_index(index) - return Indexer(name=name, data_source_name=ds.name, target_index_name=ind.name) + return SearchIndexer(name=name, data_source_name=ds.name, target_index_name=ind.name) @SearchResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) diff --git a/sdk/search/azure-search-documents/tests/test_regex_flags.py b/sdk/search/azure-search-documents/tests/test_regex_flags.py index 6474bbed6c6d..26775607d3b7 100644 --- a/sdk/search/azure-search-documents/tests/test_regex_flags.py +++ b/sdk/search/azure-search-documents/tests/test_regex_flags.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. # ------------------------------------ -from azure.search.documents import Index, RegexFlags, PatternAnalyzer, PatternTokenizer +from azure.search.documents import SearchIndex, RegexFlags, PatternAnalyzer, PatternTokenizer from azure.search.documents._service._generated.models import ( PatternAnalyzer as _PatternAnalyzer, PatternTokenizer as _PatternTokenizer, @@ -23,7 +23,7 @@ def test_listize_flags_for_index(): ) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = Index( + index = SearchIndex( name="test", fields=None, analyzers=analyzers, @@ -50,7 +50,7 @@ def test_listize_multi_flags_for_index(): ) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = Index( + index = SearchIndex( name="test", fields=None, analyzers=analyzers, @@ -79,7 +79,7 @@ def test_listize_flags_for_index_enum(): ) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = Index( + index = SearchIndex( name="test", fields=None, analyzers=analyzers, @@ -106,7 +106,7 @@ def test_delistize_flags_for_index(): ) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = Index( + index = SearchIndex( name="test", fields=None, analyzers=analyzers, @@ -133,7 +133,7 @@ def test_delistize_multi_flags_for_index(): ) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = Index( + index = SearchIndex( name="test", fields=None, analyzers=analyzers, diff --git a/sdk/search/azure-search-documents/tests/test_service_live.py b/sdk/search/azure-search-documents/tests/test_service_live.py index 947ce3a85c88..08638d837af4 100644 --- a/sdk/search/azure-search-documents/tests/test_service_live.py +++ b/sdk/search/azure-search-documents/tests/test_service_live.py @@ -21,17 +21,16 @@ AnalyzeResult, CorsOptions, EntityRecognitionSkill, - Field, - Index, + SearchIndex, InputFieldMappingEntry, OutputFieldMappingEntry, SearchServiceClient, ScoringProfile, - Skillset, + SearchIndexerSkillset, DataSourceCredentials, - DataSource, - Indexer, - DataContainer, + SearchIndexerDataSource, + SearchIndexer, + SearchIndexerDataContainer, SynonymMap, SimpleField, edm @@ -126,7 +125,7 @@ def test_delete_indexes_if_unchanged(self, api_key, endpoint, index_name, **kwar scoring_profiles = [] scoring_profiles.append(scoring_profile) cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -155,7 +154,7 @@ def test_create_index(self, api_key, endpoint, index_name, **kwargs): scoring_profiles = [] scoring_profiles.append(scoring_profile) cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -177,7 +176,7 @@ def test_create_or_update_index(self, api_key, endpoint, index_name, **kwargs): ] cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) scoring_profiles = [] - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -192,7 +191,7 @@ def test_create_or_update_index(self, api_key, endpoint, index_name, **kwargs): ) scoring_profiles = [] scoring_profiles.append(scoring_profile) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -226,7 +225,7 @@ def test_create_or_update_indexes_if_unchanged(self, api_key, endpoint, index_na scoring_profiles = [] scoring_profiles.append(scoring_profile) cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -378,7 +377,7 @@ def test_create_skillset(self, api_key, endpoint, index_name, **kwargs): outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")]) result = client.create_skillset(name='test-ss', skills=[s], description="desc") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc" assert result.e_tag @@ -427,7 +426,7 @@ def test_get_skillset(self, api_key, endpoint, index_name, **kwargs): assert len(client.get_skillsets()) == 1 result = client.get_skillset("test-ss") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc" assert result.e_tag @@ -445,7 +444,7 @@ def test_get_skillsets(self, api_key, endpoint, index_name, **kwargs): client.create_skillset(name='test-ss-2', skills=[s], description="desc2") result = client.get_skillsets() assert isinstance(result, list) - assert all(isinstance(x, Skillset) for x in result) + assert all(isinstance(x, SearchIndexerSkillset) for x in result) assert set(x.name for x in result) == {"test-ss-1", "test-ss-2"} @SearchResourceGroupPreparer(random_name_enabled=True) @@ -460,7 +459,7 @@ def test_create_or_update_skillset(self, api_key, endpoint, index_name, **kwargs assert len(client.get_skillsets()) == 1 result = client.get_skillset("test-ss") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc2" @@ -476,7 +475,7 @@ def test_create_or_update_skillset_inplace(self, api_key, endpoint, index_name, assert len(client.get_skillsets()) == 1 result = client.get_skillset("test-ss") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc2" @@ -501,8 +500,8 @@ class SearchDataSourcesClientTest(AzureMgmtTestCase): def _create_datasource(self, name="sample-datasource"): credentials = DataSourceCredentials(connection_string=CONNECTION_STRING) - container = DataContainer(name='searchcontainer') - data_source = DataSource( + container = SearchIndexerDataContainer(name='searchcontainer') + data_source = SearchIndexerDataSource( name=name, type="azureblob", credentials=credentials, @@ -624,8 +623,8 @@ def _prepare_indexer(self, endpoint, api_key, name="sample-indexer", ds_name="sa con_str = self.settings.AZURE_STORAGE_CONNECTION_STRING self.scrubber.register_name_pair(con_str, 'connection_string') credentials = DataSourceCredentials(connection_string=con_str) - container = DataContainer(name='searchcontainer') - data_source = DataSource( + container = SearchIndexerDataContainer(name='searchcontainer') + data_source = SearchIndexerDataSource( name=ds_name, type="azureblob", credentials=credentials, @@ -642,9 +641,9 @@ def _prepare_indexer(self, endpoint, api_key, name="sample-indexer", ds_name="sa "key": True, "searchable": False }] - index = Index(name=index_name, fields=fields) + index = SearchIndex(name=index_name, fields=fields) ind = client.get_indexes_client().create_index(index) - return Indexer(name=name, data_source_name=ds.name, target_index_name=ind.name) + return SearchIndexer(name=name, data_source_name=ds.name, target_index_name=ind.name) @SearchResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) From 6375e2618bd06446d4fbb4f0b9d48b1623f208d6 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Fri, 8 May 2020 16:00:10 -0700 Subject: [PATCH 18/20] regenerate w/o x-ms-pageable --- .../documents/_service/_generated/__init__.py | 10 +- .../_service/_generated/_configuration.py | 10 +- .../_generated/_search_service_client.py | 21 +- .../_service/_generated/aio/__init__.py | 4 +- .../_generated/aio/_configuration_async.py | 4 +- .../aio/_search_service_client_async.py | 15 +- .../aio/operations_async/__init__.py | 4 +- .../_data_sources_operations_async.py | 69 +- .../_indexers_operations_async.py | 98 ++- .../_indexes_operations_async.py | 190 ++--- ..._search_service_client_operations_async.py | 15 +- .../_skillsets_operations_async.py | 69 +- .../_synonym_maps_operations_async.py | 69 +- .../_service/_generated/models/__init__.py | 4 +- .../_service/_generated/models/_models.py | 737 ++++++++-------- .../_service/_generated/models/_models_py3.py | 731 ++++++++-------- .../models/_search_service_client_enums.py | 790 +++++++++--------- .../_generated/operations/__init__.py | 4 +- .../operations/_data_sources_operations.py | 79 +- .../operations/_indexers_operations.py | 108 ++- .../operations/_indexes_operations.py | 198 +++-- .../_search_service_client_operations.py | 25 +- .../operations/_skillsets_operations.py | 79 +- .../operations/_synonym_maps_operations.py | 79 +- 24 files changed, 1774 insertions(+), 1638 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/__init__.py index 4e8378902f12..43602c89e078 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/__init__.py @@ -1,8 +1,16 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from ._search_service_client import SearchServiceClient __all__ = ['SearchServiceClient'] + +try: + from ._patch import patch_sdk + patch_sdk() +except ImportError: + pass diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_configuration.py index 28368a23abde..a398a3277c31 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_configuration.py @@ -1,14 +1,20 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any +from typing import TYPE_CHECKING from azure.core.configuration import Configuration from azure.core.pipeline import policies +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + VERSION = "unknown" class SearchServiceClientConfiguration(Configuration): diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_search_service_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_search_service_client.py index f466d50bb2aa..d049c0cf0dc8 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_search_service_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_search_service_client.py @@ -1,14 +1,20 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any +from typing import TYPE_CHECKING from azure.core import PipelineClient from msrest import Deserializer, Serializer +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + from ._configuration import SearchServiceClientConfiguration from .operations import DataSourcesOperations from .operations import IndexersOperations @@ -23,17 +29,18 @@ class SearchServiceClient(SearchServiceClientOperationsMixin): """Client that can be used to manage and query indexes and documents, as well as manage other resources, on a search service. :ivar data_sources: DataSourcesOperations operations - :vartype data_sources: search_service_client.operations.DataSourcesOperations + :vartype data_sources: azure.search.documents.operations.DataSourcesOperations :ivar indexers: IndexersOperations operations - :vartype indexers: search_service_client.operations.IndexersOperations + :vartype indexers: azure.search.documents.operations.IndexersOperations :ivar skillsets: SkillsetsOperations operations - :vartype skillsets: search_service_client.operations.SkillsetsOperations + :vartype skillsets: azure.search.documents.operations.SkillsetsOperations :ivar synonym_maps: SynonymMapsOperations operations - :vartype synonym_maps: search_service_client.operations.SynonymMapsOperations + :vartype synonym_maps: azure.search.documents.operations.SynonymMapsOperations :ivar indexes: IndexesOperations operations - :vartype indexes: search_service_client.operations.IndexesOperations + :vartype indexes: azure.search.documents.operations.IndexesOperations :param endpoint: The endpoint URL of the search service. :type endpoint: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """ def __init__( diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/__init__.py index 6ffdee218108..bc2235f978a0 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/__init__.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_configuration_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_configuration_async.py index 022214a557bb..d179b1eb3e0e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_configuration_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_configuration_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_search_service_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_search_service_client_async.py index 11b0ab3a5fdf..a9b8636149fc 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_search_service_client_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_search_service_client_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -23,17 +25,18 @@ class SearchServiceClient(SearchServiceClientOperationsMixin): """Client that can be used to manage and query indexes and documents, as well as manage other resources, on a search service. :ivar data_sources: DataSourcesOperations operations - :vartype data_sources: search_service_client.aio.operations_async.DataSourcesOperations + :vartype data_sources: azure.search.documents.aio.operations_async.DataSourcesOperations :ivar indexers: IndexersOperations operations - :vartype indexers: search_service_client.aio.operations_async.IndexersOperations + :vartype indexers: azure.search.documents.aio.operations_async.IndexersOperations :ivar skillsets: SkillsetsOperations operations - :vartype skillsets: search_service_client.aio.operations_async.SkillsetsOperations + :vartype skillsets: azure.search.documents.aio.operations_async.SkillsetsOperations :ivar synonym_maps: SynonymMapsOperations operations - :vartype synonym_maps: search_service_client.aio.operations_async.SynonymMapsOperations + :vartype synonym_maps: azure.search.documents.aio.operations_async.SynonymMapsOperations :ivar indexes: IndexesOperations operations - :vartype indexes: search_service_client.aio.operations_async.IndexesOperations + :vartype indexes: azure.search.documents.aio.operations_async.IndexesOperations :param endpoint: The endpoint URL of the search service. :type endpoint: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """ def __init__( diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/__init__.py index a9e96c765498..7f552e89248c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/__init__.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py index bb8449e817bd..5e8069a9d406 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union @@ -22,7 +24,7 @@ class DataSourcesOperations: instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -51,7 +53,7 @@ async def create_or_update( :param data_source_name: The name of the datasource to create or update. :type data_source_name: str :param data_source: The definition of the datasource to create or update. - :type data_source: ~search_service_client.models.SearchIndexerDataSource + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. :type if_match: str @@ -59,23 +61,25 @@ async def create_or_update( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerDataSource or ~search_service_client.models.SearchIndexerDataSource + :rtype: ~azure.search.documents.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), @@ -95,8 +99,8 @@ async def create_or_update( if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -123,7 +127,7 @@ async def create_or_update( return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + create_or_update.metadata = {'url': '/datasources(\'{dataSourceName}\')'} # type: ignore async def delete( self, @@ -144,14 +148,15 @@ async def delete( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -159,7 +164,7 @@ async def delete( api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), @@ -192,7 +197,7 @@ async def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + delete.metadata = {'url': '/datasources(\'{dataSourceName}\')'} # type: ignore async def get( self, @@ -205,14 +210,15 @@ async def get( :param data_source_name: The name of the datasource to retrieve. :type data_source_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerDataSource + :rtype: ~azure.search.documents.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -220,7 +226,7 @@ async def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), @@ -253,7 +259,7 @@ async def get( return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + get.metadata = {'url': '/datasources(\'{dataSourceName}\')'} # type: ignore async def list( self, @@ -268,14 +274,15 @@ async def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListDataSourcesResult or the result of cls(response) - :rtype: ~search_service_client.models.ListDataSourcesResult + :rtype: ~azure.search.documents.models.ListDataSourcesResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListDataSourcesResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -283,7 +290,7 @@ async def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -317,7 +324,7 @@ async def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/datasources'} + list.metadata = {'url': '/datasources'} # type: ignore async def create( self, @@ -328,24 +335,26 @@ async def create( """Creates a new datasource. :param data_source: The definition of the datasource to create. - :type data_source: ~search_service_client.models.SearchIndexerDataSource + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerDataSource + :rtype: ~azure.search.documents.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -359,8 +368,8 @@ async def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -382,4 +391,4 @@ async def create( return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/datasources'} + create.metadata = {'url': '/datasources'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py index 0d9e0ff7c835..4d5b5c3466a9 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union @@ -22,7 +24,7 @@ class IndexersOperations: instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -48,14 +50,15 @@ async def reset( :param indexer_name: The name of the indexer to reset. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -63,7 +66,7 @@ async def reset( api_version = "2019-05-06-Preview" # Construct URL - url = self.reset.metadata['url'] + url = self.reset.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -92,7 +95,7 @@ async def reset( if cls: return cls(pipeline_response, None, {}) - reset.metadata = {'url': '/indexers(\'{indexerName}\')/search.reset'} + reset.metadata = {'url': '/indexers(\'{indexerName}\')/search.reset'} # type: ignore async def run( self, @@ -105,14 +108,15 @@ async def run( :param indexer_name: The name of the indexer to run. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -120,7 +124,7 @@ async def run( api_version = "2019-05-06-Preview" # Construct URL - url = self.run.metadata['url'] + url = self.run.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -149,7 +153,7 @@ async def run( if cls: return cls(pipeline_response, None, {}) - run.metadata = {'url': '/indexers(\'{indexerName}\')/search.run'} + run.metadata = {'url': '/indexers(\'{indexerName}\')/search.run'} # type: ignore async def create_or_update( self, @@ -165,7 +169,7 @@ async def create_or_update( :param indexer_name: The name of the indexer to create or update. :type indexer_name: str :param indexer: The definition of the indexer to create or update. - :type indexer: ~search_service_client.models.SearchIndexer + :type indexer: ~azure.search.documents.models.SearchIndexer :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. :type if_match: str @@ -173,23 +177,25 @@ async def create_or_update( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexer or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexer or ~search_service_client.models.SearchIndexer + :rtype: ~azure.search.documents.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -209,8 +215,8 @@ async def create_or_update( if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -237,7 +243,7 @@ async def create_or_update( return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/indexers(\'{indexerName}\')'} + create_or_update.metadata = {'url': '/indexers(\'{indexerName}\')'} # type: ignore async def delete( self, @@ -258,14 +264,15 @@ async def delete( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -273,7 +280,7 @@ async def delete( api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -306,7 +313,7 @@ async def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/indexers(\'{indexerName}\')'} + delete.metadata = {'url': '/indexers(\'{indexerName}\')'} # type: ignore async def get( self, @@ -319,14 +326,15 @@ async def get( :param indexer_name: The name of the indexer to retrieve. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexer or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexer + :rtype: ~azure.search.documents.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -334,7 +342,7 @@ async def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -367,7 +375,7 @@ async def get( return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/indexers(\'{indexerName}\')'} + get.metadata = {'url': '/indexers(\'{indexerName}\')'} # type: ignore async def list( self, @@ -382,14 +390,15 @@ async def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListIndexersResult or the result of cls(response) - :rtype: ~search_service_client.models.ListIndexersResult + :rtype: ~azure.search.documents.models.ListIndexersResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListIndexersResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -397,7 +406,7 @@ async def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -431,7 +440,7 @@ async def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/indexers'} + list.metadata = {'url': '/indexers'} # type: ignore async def create( self, @@ -442,24 +451,26 @@ async def create( """Creates a new indexer. :param indexer: The definition of the indexer to create. - :type indexer: ~search_service_client.models.SearchIndexer + :type indexer: ~azure.search.documents.models.SearchIndexer :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexer or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexer + :rtype: ~azure.search.documents.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -473,8 +484,8 @@ async def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -496,7 +507,7 @@ async def create( return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/indexers'} + create.metadata = {'url': '/indexers'} # type: ignore async def get_status( self, @@ -509,14 +520,15 @@ async def get_status( :param indexer_name: The name of the indexer for which to retrieve status. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerStatus or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerStatus + :rtype: ~azure.search.documents.models.SearchIndexerStatus :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerStatus"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -524,7 +536,7 @@ async def get_status( api_version = "2019-05-06-Preview" # Construct URL - url = self.get_status.metadata['url'] + url = self.get_status.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -557,4 +569,4 @@ async def get_status( return cls(pipeline_response, deserialized, {}) return deserialized - get_status.metadata = {'url': '/indexers(\'{indexerName}\')/search.status'} + get_status.metadata = {'url': '/indexers(\'{indexerName}\')/search.status'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py index 6993461cbe18..ce6b938f8148 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py @@ -1,12 +1,13 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union import warnings -from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest @@ -23,7 +24,7 @@ class IndexesOperations: instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -47,24 +48,26 @@ async def create( """Creates a new search index. :param index: The definition of the index to create. - :type index: ~search_service_client.models.SearchIndex + :type index: ~azure.search.documents.models.SearchIndex :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndex or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndex + :rtype: ~azure.search.documents.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -78,8 +81,8 @@ async def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -101,9 +104,9 @@ async def create( return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/indexes'} + create.metadata = {'url': '/indexes'} # type: ignore - def list( + async def list( self, select: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, @@ -112,79 +115,61 @@ def list( """Lists all indexes available for a search service. :param select: Selects which top-level properties of the index definitions to retrieve. - Specified as a comma-separated list of JSON property names, or '*' for all properties. The - default is all properties. + Specified as a comma-separated list of JSON property names, or '*' for all properties. The + default is all properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListIndexesResult or the result of cls(response) - :rtype: ~search_service_client.models.ListIndexesResult + :rtype: ~azure.search.documents.models.ListIndexesResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListIndexesResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - else: - url = next_link - path_format_arguments = { - 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _x_ms_client_request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - header_parameters['Accept'] = 'application/json' - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - async def extract_data(pipeline_response): - deserialized = self._deserialize('ListIndexesResult', pipeline_response) - list_of_elem = deserialized.value - if cls: - list_of_elem = cls(list_of_elem) - return None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - request = prepare_request(next_link) - - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - error = self._deserialize(models.SearchError, response) - map_error(status_code=response.status_code, response=response, error_map=error_map, model=error) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return AsyncItemPaged( - get_next, extract_data - ) - list.metadata = {'url': '/indexes'} + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ListIndexesResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/indexes'} # type: ignore async def create_or_update( self, @@ -201,7 +186,7 @@ async def create_or_update( :param index_name: The definition of the index to create or update. :type index_name: str :param index: The definition of the index to create or update. - :type index: ~search_service_client.models.SearchIndex + :type index: ~azure.search.documents.models.SearchIndex :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of @@ -215,23 +200,25 @@ async def create_or_update( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndex or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndex or ~search_service_client.models.SearchIndex + :rtype: ~azure.search.documents.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -253,8 +240,8 @@ async def create_or_update( if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -281,7 +268,7 @@ async def create_or_update( return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/indexes(\'{indexName}\')'} + create_or_update.metadata = {'url': '/indexes(\'{indexName}\')'} # type: ignore async def delete( self, @@ -302,14 +289,15 @@ async def delete( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -317,7 +305,7 @@ async def delete( api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -350,7 +338,7 @@ async def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/indexes(\'{indexName}\')'} + delete.metadata = {'url': '/indexes(\'{indexName}\')'} # type: ignore async def get( self, @@ -363,14 +351,15 @@ async def get( :param index_name: The name of the index to retrieve. :type index_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndex or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndex + :rtype: ~azure.search.documents.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -378,7 +367,7 @@ async def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -411,7 +400,7 @@ async def get( return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/indexes(\'{indexName}\')'} + get.metadata = {'url': '/indexes(\'{indexName}\')'} # type: ignore async def get_statistics( self, @@ -424,14 +413,15 @@ async def get_statistics( :param index_name: The name of the index for which to retrieve statistics. :type index_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: GetIndexStatisticsResult or the result of cls(response) - :rtype: ~search_service_client.models.GetIndexStatisticsResult + :rtype: ~azure.search.documents.models.GetIndexStatisticsResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.GetIndexStatisticsResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -439,7 +429,7 @@ async def get_statistics( api_version = "2019-05-06-Preview" # Construct URL - url = self.get_statistics.metadata['url'] + url = self.get_statistics.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -472,7 +462,7 @@ async def get_statistics( return cls(pipeline_response, deserialized, {}) return deserialized - get_statistics.metadata = {'url': '/indexes(\'{indexName}\')/search.stats'} + get_statistics.metadata = {'url': '/indexes(\'{indexName}\')/search.stats'} # type: ignore async def analyze( self, @@ -486,24 +476,26 @@ async def analyze( :param index_name: The name of the index for which to test an analyzer. :type index_name: str :param request: The text and analyzer or analysis components to test. - :type request: ~search_service_client.models.AnalyzeRequest + :type request: ~azure.search.documents.models.AnalyzeRequest :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: AnalyzeResult or the result of cls(response) - :rtype: ~search_service_client.models.AnalyzeResult + :rtype: ~azure.search.documents.models.AnalyzeResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.analyze.metadata['url'] + url = self.analyze.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -518,8 +510,8 @@ async def analyze( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -541,4 +533,4 @@ async def analyze( return cls(pipeline_response, deserialized, {}) return deserialized - analyze.metadata = {'url': '/indexes(\'{indexName}\')/search.analyze'} + analyze.metadata = {'url': '/indexes(\'{indexName}\')/search.analyze'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_search_service_client_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_search_service_client_operations_async.py index 1a1707edf14b..473c7c33fa3b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_search_service_client_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_search_service_client_operations_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar @@ -25,14 +27,15 @@ async def get_service_statistics( """Gets service level statistics for a search service. :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ServiceStatistics or the result of cls(response) - :rtype: ~search_service_client.models.ServiceStatistics + :rtype: ~azure.search.documents.models.ServiceStatistics :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceStatistics"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -40,7 +43,7 @@ async def get_service_statistics( api_version = "2019-05-06-Preview" # Construct URL - url = self.get_service_statistics.metadata['url'] + url = self.get_service_statistics.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -72,4 +75,4 @@ async def get_service_statistics( return cls(pipeline_response, deserialized, {}) return deserialized - get_service_statistics.metadata = {'url': '/servicestats'} + get_service_statistics.metadata = {'url': '/servicestats'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py index 168e0d46ec59..693760d1612b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union @@ -22,7 +24,7 @@ class SkillsetsOperations: instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -52,7 +54,7 @@ async def create_or_update( :type skillset_name: str :param skillset: The skillset containing one or more skills to create or update in a search service. - :type skillset: ~search_service_client.models.SearchIndexerSkillset + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. :type if_match: str @@ -60,23 +62,25 @@ async def create_or_update( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerSkillset or ~search_service_client.models.SearchIndexerSkillset + :rtype: ~azure.search.documents.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), @@ -96,8 +100,8 @@ async def create_or_update( if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -124,7 +128,7 @@ async def create_or_update( return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + create_or_update.metadata = {'url': '/skillsets(\'{skillsetName}\')'} # type: ignore async def delete( self, @@ -145,14 +149,15 @@ async def delete( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -160,7 +165,7 @@ async def delete( api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), @@ -193,7 +198,7 @@ async def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + delete.metadata = {'url': '/skillsets(\'{skillsetName}\')'} # type: ignore async def get( self, @@ -206,14 +211,15 @@ async def get( :param skillset_name: The name of the skillset to retrieve. :type skillset_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerSkillset + :rtype: ~azure.search.documents.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -221,7 +227,7 @@ async def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), @@ -254,7 +260,7 @@ async def get( return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + get.metadata = {'url': '/skillsets(\'{skillsetName}\')'} # type: ignore async def list( self, @@ -269,14 +275,15 @@ async def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListSkillsetsResult or the result of cls(response) - :rtype: ~search_service_client.models.ListSkillsetsResult + :rtype: ~azure.search.documents.models.ListSkillsetsResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListSkillsetsResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -284,7 +291,7 @@ async def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -318,7 +325,7 @@ async def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/skillsets'} + list.metadata = {'url': '/skillsets'} # type: ignore async def create( self, @@ -329,24 +336,26 @@ async def create( """Creates a new skillset in a search service. :param skillset: The skillset containing one or more skills to create in a search service. - :type skillset: ~search_service_client.models.SearchIndexerSkillset + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerSkillset + :rtype: ~azure.search.documents.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -360,8 +369,8 @@ async def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -383,4 +392,4 @@ async def create( return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/skillsets'} + create.metadata = {'url': '/skillsets'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py index 16294474e20e..1df4d8001380 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union @@ -22,7 +24,7 @@ class SynonymMapsOperations: instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -51,7 +53,7 @@ async def create_or_update( :param synonym_map_name: The name of the synonym map to create or update. :type synonym_map_name: str :param synonym_map: The definition of the synonym map to create or update. - :type synonym_map: ~search_service_client.models.SynonymMap + :type synonym_map: ~azure.search.documents.models.SynonymMap :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. :type if_match: str @@ -59,23 +61,25 @@ async def create_or_update( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) - :rtype: ~search_service_client.models.SynonymMap or ~search_service_client.models.SynonymMap + :rtype: ~azure.search.documents.models.SynonymMap :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), @@ -95,8 +99,8 @@ async def create_or_update( if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -123,7 +127,7 @@ async def create_or_update( return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + create_or_update.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} # type: ignore async def delete( self, @@ -144,14 +148,15 @@ async def delete( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -159,7 +164,7 @@ async def delete( api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), @@ -192,7 +197,7 @@ async def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + delete.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} # type: ignore async def get( self, @@ -205,14 +210,15 @@ async def get( :param synonym_map_name: The name of the synonym map to retrieve. :type synonym_map_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) - :rtype: ~search_service_client.models.SynonymMap + :rtype: ~azure.search.documents.models.SynonymMap :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -220,7 +226,7 @@ async def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), @@ -253,7 +259,7 @@ async def get( return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + get.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} # type: ignore async def list( self, @@ -268,14 +274,15 @@ async def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListSynonymMapsResult or the result of cls(response) - :rtype: ~search_service_client.models.ListSynonymMapsResult + :rtype: ~azure.search.documents.models.ListSynonymMapsResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListSynonymMapsResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -283,7 +290,7 @@ async def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -317,7 +324,7 @@ async def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/synonymmaps'} + list.metadata = {'url': '/synonymmaps'} # type: ignore async def create( self, @@ -328,24 +335,26 @@ async def create( """Creates a new synonym map. :param synonym_map: The definition of the synonym map to create. - :type synonym_map: ~search_service_client.models.SynonymMap + :type synonym_map: ~azure.search.documents.models.SynonymMap :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) - :rtype: ~search_service_client.models.SynonymMap + :rtype: ~azure.search.documents.models.SynonymMap :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -359,8 +368,8 @@ async def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -382,4 +391,4 @@ async def create( return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/synonymmaps'} + create.metadata = {'url': '/synonymmaps'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py index 50af0a4a97e6..cc66379bb7a5 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py index bdb6ee05665b..23293adcf2b6 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -62,33 +64,33 @@ class AnalyzeRequest(msrest.serialization.Model): :type text: str :param analyzer: The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are - mutually exclusive. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', - 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- - Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', - 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', - 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', - 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', - 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', - 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', - 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', - 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', - 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- - PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', - 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', - 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', - 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', - 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', - 'simple', 'stop', 'whitespace'. - :type analyzer: str or ~search_service_client.models.LexicalAnalyzerName + mutually exclusive. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh- + Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt- + PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", "whitespace". + :type analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName :param tokenizer: The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters - are mutually exclusive. Possible values include: 'classic', 'edgeNGram', 'keyword_v2', - 'letter', 'lowercase', 'microsoft_language_tokenizer', 'microsoft_language_stemming_tokenizer', - 'nGram', 'path_hierarchy_v2', 'pattern', 'standard_v2', 'uax_url_email', 'whitespace'. - :type tokenizer: str or ~search_service_client.models.LexicalTokenizerName + are mutually exclusive. Possible values include: "classic", "edgeNGram", "keyword_v2", + "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", + "nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace". + :type tokenizer: str or ~azure.search.documents.models.LexicalTokenizerName :param token_filters: An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - :type token_filters: list[str or ~search_service_client.models.TokenFilterName] + :type token_filters: list[str or ~azure.search.documents.models.TokenFilterName] :param char_filters: An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. :type char_filters: list[str] @@ -111,7 +113,7 @@ def __init__( **kwargs ): super(AnalyzeRequest, self).__init__(**kwargs) - self.text = kwargs.get('text', None) + self.text = kwargs['text'] self.analyzer = kwargs.get('analyzer', None) self.tokenizer = kwargs.get('tokenizer', None) self.token_filters = kwargs.get('token_filters', None) @@ -124,7 +126,7 @@ class AnalyzeResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param tokens: Required. The list of tokens returned by the analyzer specified in the request. - :type tokens: list[~search_service_client.models.AnalyzedTokenInfo] + :type tokens: list[~azure.search.documents.models.AnalyzedTokenInfo] """ _validation = { @@ -140,7 +142,7 @@ def __init__( **kwargs ): super(AnalyzeResult, self).__init__(**kwargs) - self.tokens = kwargs.get('tokens', None) + self.tokens = kwargs['tokens'] class TokenFilter(msrest.serialization.Model): @@ -180,7 +182,7 @@ def __init__( ): super(TokenFilter, self).__init__(**kwargs) self.odata_type = None - self.name = kwargs.get('name', None) + self.name = kwargs['name'] class AsciiFoldingTokenFilter(TokenFilter): @@ -247,7 +249,7 @@ def __init__( **kwargs ): super(AzureActiveDirectoryApplicationCredentials, self).__init__(**kwargs) - self.application_id = kwargs.get('application_id', None) + self.application_id = kwargs['application_id'] self.application_secret = kwargs.get('application_secret', None) @@ -357,7 +359,7 @@ def __init__( ): super(CharFilter, self).__init__(**kwargs) self.odata_type = None - self.name = kwargs.get('name', None) + self.name = kwargs['name'] class CjkBigramTokenFilter(TokenFilter): @@ -373,7 +375,7 @@ class CjkBigramTokenFilter(TokenFilter): limited to 128 characters. :type name: str :param ignore_scripts: The scripts to ignore. - :type ignore_scripts: list[str or ~search_service_client.models.CjkBigramTokenFilterScripts] + :type ignore_scripts: list[str or ~azure.search.documents.models.CjkBigramTokenFilterScripts] :param output_unigrams: A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. :type output_unigrams: bool @@ -463,7 +465,7 @@ def __init__( ): super(LexicalTokenizer, self).__init__(**kwargs) self.odata_type = None - self.name = kwargs.get('name', None) + self.name = kwargs['name'] class ClassicTokenizer(LexicalTokenizer): @@ -573,7 +575,7 @@ def __init__( ): super(CognitiveServicesAccountKey, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.CognitiveServicesByKey' - self.key = kwargs.get('key', None) + self.key = kwargs['key'] class CommonGramTokenFilter(TokenFilter): @@ -619,7 +621,7 @@ def __init__( ): super(CommonGramTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.CommonGramTokenFilter' - self.common_words = kwargs.get('common_words', None) + self.common_words = kwargs['common_words'] self.ignore_case = kwargs.get('ignore_case', False) self.use_query_mode = kwargs.get('use_query_mode', False) @@ -647,10 +649,10 @@ class SearchIndexerSkill(msrest.serialization.Model): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -681,8 +683,8 @@ def __init__( self.name = kwargs.get('name', None) self.description = kwargs.get('description', None) self.context = kwargs.get('context', None) - self.inputs = kwargs.get('inputs', None) - self.outputs = kwargs.get('outputs', None) + self.inputs = kwargs['inputs'] + self.outputs = kwargs['outputs'] class ConditionalSkill(SearchIndexerSkill): @@ -705,10 +707,10 @@ class ConditionalSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -762,7 +764,7 @@ def __init__( **kwargs ): super(CorsOptions, self).__init__(**kwargs) - self.allowed_origins = kwargs.get('allowed_origins', None) + self.allowed_origins = kwargs['allowed_origins'] self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) @@ -803,7 +805,7 @@ def __init__( ): super(LexicalAnalyzer, self).__init__(**kwargs) self.odata_type = None - self.name = kwargs.get('name', None) + self.name = kwargs['name'] class CustomAnalyzer(LexicalAnalyzer): @@ -819,15 +821,15 @@ class CustomAnalyzer(LexicalAnalyzer): 128 characters. :type name: str :param tokenizer: Required. The name of the tokenizer to use to divide continuous text into a - sequence of tokens, such as breaking a sentence into words. Possible values include: 'classic', - 'edgeNGram', 'keyword_v2', 'letter', 'lowercase', 'microsoft_language_tokenizer', - 'microsoft_language_stemming_tokenizer', 'nGram', 'path_hierarchy_v2', 'pattern', - 'standard_v2', 'uax_url_email', 'whitespace'. - :type tokenizer: str or ~search_service_client.models.LexicalTokenizerName + sequence of tokens, such as breaking a sentence into words. Possible values include: "classic", + "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", + "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", + "standard_v2", "uax_url_email", "whitespace". + :type tokenizer: str or ~azure.search.documents.models.LexicalTokenizerName :param token_filters: A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. - :type token_filters: list[str or ~search_service_client.models.TokenFilterName] + :type token_filters: list[str or ~azure.search.documents.models.TokenFilterName] :param char_filters: A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. @@ -854,7 +856,7 @@ def __init__( ): super(CustomAnalyzer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer' - self.tokenizer = kwargs.get('tokenizer', None) + self.tokenizer = kwargs['tokenizer'] self.token_filters = kwargs.get('token_filters', None) self.char_filters = kwargs.get('char_filters', None) @@ -1026,7 +1028,7 @@ def __init__( ): super(DictionaryDecompounderTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' - self.word_list = kwargs.get('word_list', None) + self.word_list = kwargs['word_list'] self.min_word_size = kwargs.get('min_word_size', 5) self.min_subword_size = kwargs.get('min_subword_size', 2) self.max_subword_size = kwargs.get('max_subword_size', 15) @@ -1050,9 +1052,9 @@ class ScoringFunction(msrest.serialization.Model): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation """ _validation = { @@ -1078,8 +1080,8 @@ def __init__( ): super(ScoringFunction, self).__init__(**kwargs) self.type = None - self.field_name = kwargs.get('field_name', None) - self.boost = kwargs.get('boost', None) + self.field_name = kwargs['field_name'] + self.boost = kwargs['boost'] self.interpolation = kwargs.get('interpolation', None) @@ -1097,11 +1099,11 @@ class DistanceScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the distance scoring function. - :type parameters: ~search_service_client.models.DistanceScoringParameters + :type parameters: ~azure.search.documents.models.DistanceScoringParameters """ _validation = { @@ -1125,7 +1127,7 @@ def __init__( ): super(DistanceScoringFunction, self).__init__(**kwargs) self.type = 'distance' - self.parameters = kwargs.get('parameters', None) + self.parameters = kwargs['parameters'] class DistanceScoringParameters(msrest.serialization.Model): @@ -1156,8 +1158,8 @@ def __init__( **kwargs ): super(DistanceScoringParameters, self).__init__(**kwargs) - self.reference_point_parameter = kwargs.get('reference_point_parameter', None) - self.boosting_distance = kwargs.get('boosting_distance', None) + self.reference_point_parameter = kwargs['reference_point_parameter'] + self.boosting_distance = kwargs['boosting_distance'] class EdgeNGramTokenFilter(TokenFilter): @@ -1178,8 +1180,8 @@ class EdgeNGramTokenFilter(TokenFilter): :param max_gram: The maximum n-gram length. Default is 2. :type max_gram: int :param side: Specifies which side of the input the n-gram should be generated from. Default is - "front". Possible values include: 'front', 'back'. - :type side: str or ~search_service_client.models.EdgeNGramTokenFilterSide + "front". Possible values include: "front", "back". + :type side: str or ~azure.search.documents.models.EdgeNGramTokenFilterSide """ _validation = { @@ -1224,8 +1226,8 @@ class EdgeNGramTokenFilterV2(TokenFilter): :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :type max_gram: int :param side: Specifies which side of the input the n-gram should be generated from. Default is - "front". Possible values include: 'front', 'back'. - :type side: str or ~search_service_client.models.EdgeNGramTokenFilterSide + "front". Possible values include: "front", "back". + :type side: str or ~azure.search.documents.models.EdgeNGramTokenFilterSide """ _validation = { @@ -1272,7 +1274,7 @@ class EdgeNGramTokenizer(LexicalTokenizer): :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :type max_gram: int :param token_chars: Character classes to keep in the tokens. - :type token_chars: list[str or ~search_service_client.models.TokenCharacterKind] + :type token_chars: list[str or ~azure.search.documents.models.TokenCharacterKind] """ _validation = { @@ -1357,17 +1359,17 @@ class EntityRecognitionSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param categories: A list of entity categories that should be extracted. - :type categories: list[str or ~search_service_client.models.EntityCategory] + :type categories: list[str or ~azure.search.documents.models.EntityCategory] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'ar', 'cs', 'zh-Hans', 'zh-Hant', 'da', 'nl', 'en', 'fi', 'fr', 'de', - 'el', 'hu', 'it', 'ja', 'ko', 'no', 'pl', 'pt-PT', 'pt-BR', 'ru', 'es', 'sv', 'tr'. + Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", + "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr". :type default_language_code: str or - ~search_service_client.models.EntityRecognitionSkillLanguage + ~azure.search.documents.models.EntityRecognitionSkillLanguage :param include_typeless_entities: Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not @@ -1421,7 +1423,7 @@ class FieldMapping(msrest.serialization.Model): name by default. :type target_field_name: str :param mapping_function: A function to apply to each source field value before indexing. - :type mapping_function: ~search_service_client.models.FieldMappingFunction + :type mapping_function: ~azure.search.documents.models.FieldMappingFunction """ _validation = { @@ -1439,7 +1441,7 @@ def __init__( **kwargs ): super(FieldMapping, self).__init__(**kwargs) - self.source_field_name = kwargs.get('source_field_name', None) + self.source_field_name = kwargs['source_field_name'] self.target_field_name = kwargs.get('target_field_name', None) self.mapping_function = kwargs.get('mapping_function', None) @@ -1470,7 +1472,7 @@ def __init__( **kwargs ): super(FieldMappingFunction, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.parameters = kwargs.get('parameters', None) @@ -1488,11 +1490,11 @@ class FreshnessScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the freshness scoring function. - :type parameters: ~search_service_client.models.FreshnessScoringParameters + :type parameters: ~azure.search.documents.models.FreshnessScoringParameters """ _validation = { @@ -1516,7 +1518,7 @@ def __init__( ): super(FreshnessScoringFunction, self).__init__(**kwargs) self.type = 'freshness' - self.parameters = kwargs.get('parameters', None) + self.parameters = kwargs['parameters'] class FreshnessScoringParameters(msrest.serialization.Model): @@ -1542,7 +1544,7 @@ def __init__( **kwargs ): super(FreshnessScoringParameters, self).__init__(**kwargs) - self.boosting_duration = kwargs.get('boosting_duration', None) + self.boosting_duration = kwargs['boosting_duration'] class GetIndexStatisticsResult(msrest.serialization.Model): @@ -1605,7 +1607,7 @@ def __init__( ): super(HighWaterMarkChangeDetectionPolicy, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' - self.high_water_mark_column_name = kwargs.get('high_water_mark_column_name', None) + self.high_water_mark_column_name = kwargs['high_water_mark_column_name'] class ImageAnalysisSkill(SearchIndexerSkill): @@ -1628,17 +1630,17 @@ class ImageAnalysisSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'en', 'es', 'ja', 'pt', 'zh'. - :type default_language_code: str or ~search_service_client.models.ImageAnalysisSkillLanguage + Possible values include: "en", "es", "ja", "pt", "zh". + :type default_language_code: str or ~azure.search.documents.models.ImageAnalysisSkillLanguage :param visual_features: A list of visual features. - :type visual_features: list[str or ~search_service_client.models.VisualFeature] + :type visual_features: list[str or ~azure.search.documents.models.VisualFeature] :param details: A string indicating which domain-specific details to return. - :type details: list[str or ~search_service_client.models.ImageDetail] + :type details: list[str or ~azure.search.documents.models.ImageDetail] """ _validation = { @@ -1678,8 +1680,8 @@ class IndexerExecutionResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar status: Required. The outcome of this indexer execution. Possible values include: - 'transientFailure', 'success', 'inProgress', 'reset'. - :vartype status: str or ~search_service_client.models.IndexerExecutionStatus + "transientFailure", "success", "inProgress", "reset". + :vartype status: str or ~azure.search.documents.models.IndexerExecutionStatus :ivar error_message: The error message indicating the top-level error, if any. :vartype error_message: str :ivar start_time: The start time of this indexer execution. @@ -1687,9 +1689,9 @@ class IndexerExecutionResult(msrest.serialization.Model): :ivar end_time: The end time of this indexer execution, if the execution has already completed. :vartype end_time: ~datetime.datetime :ivar errors: Required. The item-level indexing errors. - :vartype errors: list[~search_service_client.models.SearchIndexerError] + :vartype errors: list[~azure.search.documents.models.SearchIndexerError] :ivar warnings: Required. The item-level indexing warnings. - :vartype warnings: list[~search_service_client.models.SearchIndexerWarning] + :vartype warnings: list[~azure.search.documents.models.SearchIndexerWarning] :ivar item_count: Required. The number of items that were processed during this indexer execution. This includes both successfully processed items and items where indexing was attempted but failed. @@ -1806,7 +1808,7 @@ def __init__( **kwargs ): super(IndexingSchedule, self).__init__(**kwargs) - self.interval = kwargs.get('interval', None) + self.interval = kwargs['interval'] self.start_time = kwargs.get('start_time', None) @@ -1822,7 +1824,7 @@ class InputFieldMappingEntry(msrest.serialization.Model): :param source_context: The source context used for selecting recursive inputs. :type source_context: str :param inputs: The recursive inputs used when creating a complex type. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] """ _validation = { @@ -1841,7 +1843,7 @@ def __init__( **kwargs ): super(InputFieldMappingEntry, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.source = kwargs.get('source', None) self.source_context = kwargs.get('source_context', None) self.inputs = kwargs.get('inputs', None) @@ -1885,7 +1887,7 @@ def __init__( ): super(KeepTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.KeepTokenFilter' - self.keep_words = kwargs.get('keep_words', None) + self.keep_words = kwargs['keep_words'] self.lower_case_keep_words = kwargs.get('lower_case_keep_words', False) @@ -1909,15 +1911,15 @@ class KeyPhraseExtractionSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'it', 'ja', 'ko', 'no', 'pl', 'pt- - PT', 'pt-BR', 'ru', 'es', 'sv'. + Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl", "pt- + PT", "pt-BR", "ru", "es", "sv". :type default_language_code: str or - ~search_service_client.models.KeyPhraseExtractionSkillLanguage + ~azure.search.documents.models.KeyPhraseExtractionSkillLanguage :param max_key_phrase_count: A number indicating how many key phrases to return. If absent, all identified key phrases will be returned. :type max_key_phrase_count: int @@ -1988,7 +1990,7 @@ def __init__( ): super(KeywordMarkerTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' - self.keywords = kwargs.get('keywords', None) + self.keywords = kwargs['keywords'] self.ignore_case = kwargs.get('ignore_case', False) @@ -2086,10 +2088,10 @@ class LanguageDetectionSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -2207,7 +2209,7 @@ class ListDataSourcesResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar data_sources: Required. The datasources in the Search service. - :vartype data_sources: list[~search_service_client.models.SearchIndexerDataSource] + :vartype data_sources: list[~azure.search.documents.models.SearchIndexerDataSource] """ _validation = { @@ -2234,7 +2236,7 @@ class ListIndexersResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar indexers: Required. The indexers in the Search service. - :vartype indexers: list[~search_service_client.models.SearchIndexer] + :vartype indexers: list[~azure.search.documents.models.SearchIndexer] """ _validation = { @@ -2261,7 +2263,7 @@ class ListIndexesResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar indexes: Required. The indexes in the Search service. - :vartype indexes: list[~search_service_client.models.SearchIndex] + :vartype indexes: list[~azure.search.documents.models.SearchIndex] """ _validation = { @@ -2288,7 +2290,7 @@ class ListSkillsetsResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar skillsets: Required. The skillsets defined in the Search service. - :vartype skillsets: list[~search_service_client.models.SearchIndexerSkillset] + :vartype skillsets: list[~azure.search.documents.models.SearchIndexerSkillset] """ _validation = { @@ -2315,7 +2317,7 @@ class ListSynonymMapsResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar synonym_maps: Required. The synonym maps in the Search service. - :vartype synonym_maps: list[~search_service_client.models.SynonymMap] + :vartype synonym_maps: list[~azure.search.documents.models.SynonymMap] """ _validation = { @@ -2465,11 +2467,11 @@ class MagnitudeScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the magnitude scoring function. - :type parameters: ~search_service_client.models.MagnitudeScoringParameters + :type parameters: ~azure.search.documents.models.MagnitudeScoringParameters """ _validation = { @@ -2493,7 +2495,7 @@ def __init__( ): super(MagnitudeScoringFunction, self).__init__(**kwargs) self.type = 'magnitude' - self.parameters = kwargs.get('parameters', None) + self.parameters = kwargs['parameters'] class MagnitudeScoringParameters(msrest.serialization.Model): @@ -2526,8 +2528,8 @@ def __init__( **kwargs ): super(MagnitudeScoringParameters, self).__init__(**kwargs) - self.boosting_range_start = kwargs.get('boosting_range_start', None) - self.boosting_range_end = kwargs.get('boosting_range_end', None) + self.boosting_range_start = kwargs['boosting_range_start'] + self.boosting_range_end = kwargs['boosting_range_end'] self.should_boost_beyond_range_by_constant = kwargs.get('should_boost_beyond_range_by_constant', None) @@ -2566,7 +2568,7 @@ def __init__( ): super(MappingCharFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.MappingCharFilter' - self.mappings = kwargs.get('mappings', None) + self.mappings = kwargs['mappings'] class MergeSkill(SearchIndexerSkill): @@ -2589,10 +2591,10 @@ class MergeSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is an empty space. :type insert_pre_tag: str @@ -2649,13 +2651,13 @@ class MicrosoftLanguageStemmingTokenizer(LexicalTokenizer): as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. :type is_search_tokenizer: bool :param language: The language to use. The default is English. Possible values include: - 'arabic', 'bangla', 'bulgarian', 'catalan', 'croatian', 'czech', 'danish', 'dutch', 'english', - 'estonian', 'finnish', 'french', 'german', 'greek', 'gujarati', 'hebrew', 'hindi', 'hungarian', - 'icelandic', 'indonesian', 'italian', 'kannada', 'latvian', 'lithuanian', 'malay', 'malayalam', - 'marathi', 'norwegianBokmaal', 'polish', 'portuguese', 'portugueseBrazilian', 'punjabi', - 'romanian', 'russian', 'serbianCyrillic', 'serbianLatin', 'slovak', 'slovenian', 'spanish', - 'swedish', 'tamil', 'telugu', 'turkish', 'ukrainian', 'urdu'. - :type language: str or ~search_service_client.models.MicrosoftStemmingTokenizerLanguage + "arabic", "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english", + "estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian", + "icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam", + "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", + "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish", + "swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu". + :type language: str or ~azure.search.documents.models.MicrosoftStemmingTokenizerLanguage """ _validation = { @@ -2704,13 +2706,13 @@ class MicrosoftLanguageTokenizer(LexicalTokenizer): as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. :type is_search_tokenizer: bool :param language: The language to use. The default is English. Possible values include: - 'bangla', 'bulgarian', 'catalan', 'chineseSimplified', 'chineseTraditional', 'croatian', - 'czech', 'danish', 'dutch', 'english', 'french', 'german', 'greek', 'gujarati', 'hindi', - 'icelandic', 'indonesian', 'italian', 'japanese', 'kannada', 'korean', 'malay', 'malayalam', - 'marathi', 'norwegianBokmaal', 'polish', 'portuguese', 'portugueseBrazilian', 'punjabi', - 'romanian', 'russian', 'serbianCyrillic', 'serbianLatin', 'slovenian', 'spanish', 'swedish', - 'tamil', 'telugu', 'thai', 'ukrainian', 'urdu', 'vietnamese'. - :type language: str or ~search_service_client.models.MicrosoftTokenizerLanguage + "bangla", "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", + "czech", "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", + "icelandic", "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", + "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", + "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", + "tamil", "telugu", "thai", "ukrainian", "urdu", "vietnamese". + :type language: str or ~azure.search.documents.models.MicrosoftTokenizerLanguage """ _validation = { @@ -2840,7 +2842,7 @@ class NGramTokenizer(LexicalTokenizer): :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :type max_gram: int :param token_chars: Character classes to keep in the tokens. - :type token_chars: list[str or ~search_service_client.models.TokenCharacterKind] + :type token_chars: list[str or ~azure.search.documents.models.TokenCharacterKind] """ _validation = { @@ -2889,18 +2891,18 @@ class OcrSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param text_extraction_algorithm: A value indicating which algorithm to use for extracting - text. Default is printed. Possible values include: 'printed', 'handwritten'. - :type text_extraction_algorithm: str or ~search_service_client.models.TextExtractionAlgorithm + text. Default is printed. Possible values include: "printed", "handwritten". + :type text_extraction_algorithm: str or ~azure.search.documents.models.TextExtractionAlgorithm :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', - 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr- - Latn', 'sk'. - :type default_language_code: str or ~search_service_client.models.OcrSkillLanguage + Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el", + "hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl", "sr- + Latn", "sk". + :type default_language_code: str or ~azure.search.documents.models.OcrSkillLanguage :param should_detect_orientation: A value indicating to turn orientation detection on or not. Default is false. :type should_detect_orientation: bool @@ -2960,7 +2962,7 @@ def __init__( **kwargs ): super(OutputFieldMappingEntry, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.target_name = kwargs.get('target_name', None) @@ -3036,9 +3038,9 @@ class PatternAnalyzer(LexicalAnalyzer): :param pattern: A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. :type pattern: str - :param flags: Regular expression flags. Possible values include: 'CANON_EQ', - 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. - :type flags: str or ~search_service_client.models.RegexFlags + :param flags: Regular expression flags. Possible values include: "CANON_EQ", + "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :type flags: str or ~azure.search.documents.models.RegexFlags :param stopwords: A list of stopwords. :type stopwords: list[str] """ @@ -3107,7 +3109,7 @@ def __init__( ): super(PatternCaptureTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternCaptureTokenFilter' - self.patterns = kwargs.get('patterns', None) + self.patterns = kwargs['patterns'] self.preserve_original = kwargs.get('preserve_original', True) @@ -3149,8 +3151,8 @@ def __init__( ): super(PatternReplaceCharFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternReplaceCharFilter' - self.pattern = kwargs.get('pattern', None) - self.replacement = kwargs.get('replacement', None) + self.pattern = kwargs['pattern'] + self.replacement = kwargs['replacement'] class PatternReplaceTokenFilter(TokenFilter): @@ -3191,8 +3193,8 @@ def __init__( ): super(PatternReplaceTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' - self.pattern = kwargs.get('pattern', None) - self.replacement = kwargs.get('replacement', None) + self.pattern = kwargs['pattern'] + self.replacement = kwargs['replacement'] class PatternTokenizer(LexicalTokenizer): @@ -3210,9 +3212,9 @@ class PatternTokenizer(LexicalTokenizer): :param pattern: A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. :type pattern: str - :param flags: Regular expression flags. Possible values include: 'CANON_EQ', - 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. - :type flags: str or ~search_service_client.models.RegexFlags + :param flags: Regular expression flags. Possible values include: "CANON_EQ", + "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :type flags: str or ~azure.search.documents.models.RegexFlags :param group: The zero-based ordinal of the matching group in the regular expression pattern to extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. @@ -3256,9 +3258,9 @@ class PhoneticTokenFilter(TokenFilter): limited to 128 characters. :type name: str :param encoder: The phonetic encoder to use. Default is "metaphone". Possible values include: - 'metaphone', 'doubleMetaphone', 'soundex', 'refinedSoundex', 'caverphone1', 'caverphone2', - 'cologne', 'nysiis', 'koelnerPhonetik', 'haasePhonetik', 'beiderMorse'. - :type encoder: str or ~search_service_client.models.PhoneticEncoder + "metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2", + "cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse". + :type encoder: str or ~azure.search.documents.models.PhoneticEncoder :param replace_original_tokens: A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. :type replace_original_tokens: bool @@ -3330,7 +3332,7 @@ def __init__( **kwargs ): super(ResourceCounter, self).__init__(**kwargs) - self.usage = kwargs.get('usage', None) + self.usage = kwargs['usage'] self.quota = kwargs.get('quota', None) @@ -3343,13 +3345,13 @@ class ScoringProfile(msrest.serialization.Model): :type name: str :param text_weights: Parameters that boost scoring based on text matches in certain index fields. - :type text_weights: ~search_service_client.models.TextWeights + :type text_weights: ~azure.search.documents.models.TextWeights :param functions: The collection of functions that influence the scoring of documents. - :type functions: list[~search_service_client.models.ScoringFunction] + :type functions: list[~azure.search.documents.models.ScoringFunction] :param function_aggregation: A value indicating how the results of individual scoring functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Possible - values include: 'sum', 'average', 'minimum', 'maximum', 'firstMatching'. - :type function_aggregation: str or ~search_service_client.models.ScoringFunctionAggregation + values include: "sum", "average", "minimum", "maximum", "firstMatching". + :type function_aggregation: str or ~azure.search.documents.models.ScoringFunctionAggregation """ _validation = { @@ -3368,7 +3370,7 @@ def __init__( **kwargs ): super(ScoringProfile, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.text_weights = kwargs.get('text_weights', None) self.functions = kwargs.get('functions', None) self.function_aggregation = kwargs.get('function_aggregation', None) @@ -3386,7 +3388,7 @@ class SearchError(msrest.serialization.Model): :ivar message: Required. A human-readable representation of the error. :vartype message: str :ivar details: An array of details about specific errors that led to this reported error. - :vartype details: list[~search_service_client.models.SearchError] + :vartype details: list[~azure.search.documents.models.SearchError] """ _validation = { @@ -3419,10 +3421,10 @@ class SearchField(msrest.serialization.Model): :param name: Required. The name of the field, which must be unique within the fields collection of the index or parent field. :type name: str - :param type: Required. The data type of the field. Possible values include: 'Edm.String', - 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', - 'Edm.GeographyPoint', 'Edm.ComplexType'. - :type type: str or ~search_service_client.models.SearchFieldDataType + :param type: Required. The data type of the field. Possible values include: "Edm.String", + "Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset", + "Edm.GeographyPoint", "Edm.ComplexType". + :type type: str or ~azure.search.documents.models.SearchFieldDataType :param key: A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type Edm.String. Key fields can be used to look up documents directly and update or delete @@ -3473,70 +3475,70 @@ class SearchField(msrest.serialization.Model): :param analyzer: The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null - for complex fields. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', - 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- - Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', - 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', - 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', - 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', - 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', - 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', - 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', - 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', - 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- - PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', - 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', - 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', - 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', - 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', - 'simple', 'stop', 'whitespace'. - :type analyzer: str or ~search_service_client.models.LexicalAnalyzerName + for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh- + Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt- + PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", "whitespace". + :type analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName :param search_analyzer: The name of the analyzer used at search time for the field. This option can be used only with searchable fields. It must be set together with indexAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be updated on an existing field. Must be null for complex fields. Possible values - include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft', - 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh-Hans.lucene', 'zh- - Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', 'cs.lucene', 'da.microsoft', - 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', 'en.lucene', 'et.microsoft', - 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', 'gl.lucene', 'de.microsoft', - 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', 'he.microsoft', 'hi.microsoft', - 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', 'id.microsoft', 'id.lucene', - 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', 'ja.lucene', 'kn.microsoft', - 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', 'lt.microsoft', 'ml.microsoft', - 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', 'fa.lucene', 'pl.microsoft', - 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', 'pt-PT.lucene', - 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- - cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', - 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', - 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', - 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', - 'whitespace'. - :type search_analyzer: str or ~search_service_client.models.LexicalAnalyzerName + include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", + "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh- + Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", "da.microsoft", + "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", "et.microsoft", + "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", "de.microsoft", + "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", "hi.microsoft", + "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", "id.lucene", + "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", "kn.microsoft", + "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", "ml.microsoft", + "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", "pl.microsoft", + "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", "pt-PT.lucene", + "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", "sr- + cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :type search_analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName :param index_analyzer: The name of the analyzer used at indexing time for the field. This option can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. - Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', - 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh- - Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', - 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', - 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', - 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', - 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', - 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', - 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', - 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', - 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', - 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- - cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', - 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', - 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', - 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', - 'whitespace'. - :type index_analyzer: str or ~search_service_client.models.LexicalAnalyzerName + Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", + "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh- + Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", + "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", + "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", + "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", + "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", + "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", + "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", + "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", + "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", + "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", "sr- + cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :type index_analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName :param synonym_maps: A list of the names of synonym maps to associate with this field. This option can be used only with searchable fields. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are @@ -3545,7 +3547,7 @@ class SearchField(msrest.serialization.Model): :type synonym_maps: list[str] :param fields: A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields. - :type fields: list[~search_service_client.models.SearchField] + :type fields: list[~azure.search.documents.models.SearchField] """ _validation = { @@ -3574,8 +3576,8 @@ def __init__( **kwargs ): super(SearchField, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.type = kwargs.get('type', None) + self.name = kwargs['name'] + self.type = kwargs['type'] self.key = kwargs.get('key', None) self.retrievable = kwargs.get('retrievable', None) self.searchable = kwargs.get('searchable', None) @@ -3597,25 +3599,25 @@ class SearchIndex(msrest.serialization.Model): :param name: Required. The name of the index. :type name: str :param fields: Required. The fields of the index. - :type fields: list[~search_service_client.models.SearchField] + :type fields: list[~azure.search.documents.models.SearchField] :param scoring_profiles: The scoring profiles for the index. - :type scoring_profiles: list[~search_service_client.models.ScoringProfile] + :type scoring_profiles: list[~azure.search.documents.models.ScoringProfile] :param default_scoring_profile: The name of the scoring profile to use if none is specified in the query. If this property is not set and no scoring profile is specified in the query, then default scoring (tf-idf) will be used. :type default_scoring_profile: str :param cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. - :type cors_options: ~search_service_client.models.CorsOptions + :type cors_options: ~azure.search.documents.models.CorsOptions :param suggesters: The suggesters for the index. - :type suggesters: list[~search_service_client.models.Suggester] + :type suggesters: list[~azure.search.documents.models.Suggester] :param analyzers: The analyzers for the index. - :type analyzers: list[~search_service_client.models.LexicalAnalyzer] + :type analyzers: list[~azure.search.documents.models.LexicalAnalyzer] :param tokenizers: The tokenizers for the index. - :type tokenizers: list[~search_service_client.models.LexicalTokenizer] + :type tokenizers: list[~azure.search.documents.models.LexicalTokenizer] :param token_filters: The token filters for the index. - :type token_filters: list[~search_service_client.models.TokenFilter] + :type token_filters: list[~azure.search.documents.models.TokenFilter] :param char_filters: The character filters for the index. - :type char_filters: list[~search_service_client.models.CharFilter] + :type char_filters: list[~azure.search.documents.models.CharFilter] :param encryption_key: A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive @@ -3624,12 +3626,12 @@ class SearchIndex(msrest.serialization.Model): needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :type encryption_key: ~search_service_client.models.SearchResourceEncryptionKey + :type encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey :param similarity: The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. - :type similarity: ~search_service_client.models.Similarity + :type similarity: ~azure.search.documents.models.Similarity :param e_tag: The ETag of the index. :type e_tag: str """ @@ -3660,8 +3662,8 @@ def __init__( **kwargs ): super(SearchIndex, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.fields = kwargs.get('fields', None) + self.name = kwargs['name'] + self.fields = kwargs['fields'] self.scoring_profiles = kwargs.get('scoring_profiles', None) self.default_scoring_profile = kwargs.get('default_scoring_profile', None) self.cors_options = kwargs.get('cors_options', None) @@ -3692,15 +3694,15 @@ class SearchIndexer(msrest.serialization.Model): :param target_index_name: Required. The name of the index to which this indexer writes data. :type target_index_name: str :param schedule: The schedule for this indexer. - :type schedule: ~search_service_client.models.IndexingSchedule + :type schedule: ~azure.search.documents.models.IndexingSchedule :param parameters: Parameters for indexer execution. - :type parameters: ~search_service_client.models.IndexingParameters + :type parameters: ~azure.search.documents.models.IndexingParameters :param field_mappings: Defines mappings between fields in the data source and corresponding target fields in the index. - :type field_mappings: list[~search_service_client.models.FieldMapping] + :type field_mappings: list[~azure.search.documents.models.FieldMapping] :param output_field_mappings: Output field mappings are applied after enrichment and immediately before indexing. - :type output_field_mappings: list[~search_service_client.models.FieldMapping] + :type output_field_mappings: list[~azure.search.documents.models.FieldMapping] :param is_disabled: A value indicating whether the indexer is disabled. Default is false. :type is_disabled: bool :param e_tag: The ETag of the indexer. @@ -3732,11 +3734,11 @@ def __init__( **kwargs ): super(SearchIndexer, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.description = kwargs.get('description', None) - self.data_source_name = kwargs.get('data_source_name', None) + self.data_source_name = kwargs['data_source_name'] self.skillset_name = kwargs.get('skillset_name', None) - self.target_index_name = kwargs.get('target_index_name', None) + self.target_index_name = kwargs['target_index_name'] self.schedule = kwargs.get('schedule', None) self.parameters = kwargs.get('parameters', None) self.field_mappings = kwargs.get('field_mappings', None) @@ -3772,7 +3774,7 @@ def __init__( **kwargs ): super(SearchIndexerDataContainer, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.query = kwargs.get('query', None) @@ -3785,17 +3787,18 @@ class SearchIndexerDataSource(msrest.serialization.Model): :type name: str :param description: The description of the datasource. :type description: str - :param type: Required. The type of the datasource. Possible values include: 'azuresql', - 'cosmosdb', 'azureblob', 'azuretable', 'mysql'. - :type type: str or ~search_service_client.models.SearchIndexerDataSourceType + :param type: Required. The type of the datasource. Possible values include: "azuresql", + "cosmosdb", "azureblob", "azuretable", "mysql". + :type type: str or ~azure.search.documents.models.SearchIndexerDataSourceType :param credentials: Required. Credentials for the datasource. - :type credentials: ~search_service_client.models.DataSourceCredentials + :type credentials: ~azure.search.documents.models.DataSourceCredentials :param container: Required. The data container for the datasource. - :type container: ~search_service_client.models.SearchIndexerDataContainer + :type container: ~azure.search.documents.models.SearchIndexerDataContainer :param data_change_detection_policy: The data change detection policy for the datasource. - :type data_change_detection_policy: ~search_service_client.models.DataChangeDetectionPolicy + :type data_change_detection_policy: ~azure.search.documents.models.DataChangeDetectionPolicy :param data_deletion_detection_policy: The data deletion detection policy for the datasource. - :type data_deletion_detection_policy: ~search_service_client.models.DataDeletionDetectionPolicy + :type data_deletion_detection_policy: + ~azure.search.documents.models.DataDeletionDetectionPolicy :param e_tag: The ETag of the data source. :type e_tag: str """ @@ -3823,11 +3826,11 @@ def __init__( **kwargs ): super(SearchIndexerDataSource, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.description = kwargs.get('description', None) - self.type = kwargs.get('type', None) - self.credentials = kwargs.get('credentials', None) - self.container = kwargs.get('container', None) + self.type = kwargs['type'] + self.credentials = kwargs['credentials'] + self.container = kwargs['container'] self.data_change_detection_policy = kwargs.get('data_change_detection_policy', None) self.data_deletion_detection_policy = kwargs.get('data_deletion_detection_policy', None) self.e_tag = kwargs.get('e_tag', None) @@ -3940,10 +3943,10 @@ class SearchIndexerSkillset(msrest.serialization.Model): :param description: Required. The description of the skillset. :type description: str :param skills: Required. A list of skills in the skillset. - :type skills: list[~search_service_client.models.SearchIndexerSkill] + :type skills: list[~azure.search.documents.models.SearchIndexerSkill] :param cognitive_services_account: Details about cognitive services to be used when running skills. - :type cognitive_services_account: ~search_service_client.models.CognitiveServicesAccount + :type cognitive_services_account: ~azure.search.documents.models.CognitiveServicesAccount :param e_tag: The ETag of the skillset. :type e_tag: str """ @@ -3967,9 +3970,9 @@ def __init__( **kwargs ): super(SearchIndexerSkillset, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.description = kwargs.get('description', None) - self.skills = kwargs.get('skills', None) + self.name = kwargs['name'] + self.description = kwargs['description'] + self.skills = kwargs['skills'] self.cognitive_services_account = kwargs.get('cognitive_services_account', None) self.e_tag = kwargs.get('e_tag', None) @@ -3981,16 +3984,16 @@ class SearchIndexerStatus(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :ivar status: Required. Overall indexer status. Possible values include: 'unknown', 'error', - 'running'. - :vartype status: str or ~search_service_client.models.IndexerStatus + :ivar status: Required. Overall indexer status. Possible values include: "unknown", "error", + "running". + :vartype status: str or ~azure.search.documents.models.IndexerStatus :ivar last_result: The result of the most recent or an in-progress indexer execution. - :vartype last_result: ~search_service_client.models.IndexerExecutionResult + :vartype last_result: ~azure.search.documents.models.IndexerExecutionResult :ivar execution_history: Required. History of the recent indexer executions, sorted in reverse chronological order. - :vartype execution_history: list[~search_service_client.models.IndexerExecutionResult] + :vartype execution_history: list[~azure.search.documents.models.IndexerExecutionResult] :ivar limits: Required. The execution limits for the indexer. - :vartype limits: ~search_service_client.models.SearchIndexerLimits + :vartype limits: ~azure.search.documents.models.SearchIndexerLimits """ _validation = { @@ -4087,7 +4090,7 @@ class SearchResourceEncryptionKey(msrest.serialization.Model): :param access_credentials: Optional Azure Active Directory credentials used for accessing your Azure Key Vault. Not required if using managed identity instead. :type access_credentials: - ~search_service_client.models.AzureActiveDirectoryApplicationCredentials + ~azure.search.documents.models.AzureActiveDirectoryApplicationCredentials """ _validation = { @@ -4108,9 +4111,9 @@ def __init__( **kwargs ): super(SearchResourceEncryptionKey, self).__init__(**kwargs) - self.key_name = kwargs.get('key_name', None) - self.key_version = kwargs.get('key_version', None) - self.vault_uri = kwargs.get('vault_uri', None) + self.key_name = kwargs['key_name'] + self.key_version = kwargs['key_version'] + self.vault_uri = kwargs['vault_uri'] self.access_credentials = kwargs.get('access_credentials', None) @@ -4134,14 +4137,14 @@ class SentimentSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'it', 'no', 'pl', 'pt-PT', - 'ru', 'es', 'sv', 'tr'. - :type default_language_code: str or ~search_service_client.models.SentimentSkillLanguage + Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", + "ru", "es", "sv", "tr". + :type default_language_code: str or ~azure.search.documents.models.SentimentSkillLanguage """ _validation = { @@ -4175,19 +4178,19 @@ class ServiceCounters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param document_counter: Required. Total number of documents across all indexes in the service. - :type document_counter: ~search_service_client.models.ResourceCounter + :type document_counter: ~azure.search.documents.models.ResourceCounter :param index_counter: Required. Total number of indexes. - :type index_counter: ~search_service_client.models.ResourceCounter + :type index_counter: ~azure.search.documents.models.ResourceCounter :param indexer_counter: Required. Total number of indexers. - :type indexer_counter: ~search_service_client.models.ResourceCounter + :type indexer_counter: ~azure.search.documents.models.ResourceCounter :param data_source_counter: Required. Total number of data sources. - :type data_source_counter: ~search_service_client.models.ResourceCounter + :type data_source_counter: ~azure.search.documents.models.ResourceCounter :param storage_size_counter: Required. Total size of used storage in bytes. - :type storage_size_counter: ~search_service_client.models.ResourceCounter + :type storage_size_counter: ~azure.search.documents.models.ResourceCounter :param synonym_map_counter: Required. Total number of synonym maps. - :type synonym_map_counter: ~search_service_client.models.ResourceCounter + :type synonym_map_counter: ~azure.search.documents.models.ResourceCounter :param skillset_counter: Required. Total number of skillsets. - :type skillset_counter: ~search_service_client.models.ResourceCounter + :type skillset_counter: ~azure.search.documents.models.ResourceCounter """ _validation = { @@ -4215,13 +4218,13 @@ def __init__( **kwargs ): super(ServiceCounters, self).__init__(**kwargs) - self.document_counter = kwargs.get('document_counter', None) - self.index_counter = kwargs.get('index_counter', None) - self.indexer_counter = kwargs.get('indexer_counter', None) - self.data_source_counter = kwargs.get('data_source_counter', None) - self.storage_size_counter = kwargs.get('storage_size_counter', None) - self.synonym_map_counter = kwargs.get('synonym_map_counter', None) - self.skillset_counter = kwargs.get('skillset_counter', None) + self.document_counter = kwargs['document_counter'] + self.index_counter = kwargs['index_counter'] + self.indexer_counter = kwargs['indexer_counter'] + self.data_source_counter = kwargs['data_source_counter'] + self.storage_size_counter = kwargs['storage_size_counter'] + self.synonym_map_counter = kwargs['synonym_map_counter'] + self.skillset_counter = kwargs['skillset_counter'] class ServiceLimits(msrest.serialization.Model): @@ -4264,9 +4267,9 @@ class ServiceStatistics(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param counters: Required. Service level resource counters. - :type counters: ~search_service_client.models.ServiceCounters + :type counters: ~azure.search.documents.models.ServiceCounters :param limits: Required. Service level general limits. - :type limits: ~search_service_client.models.ServiceLimits + :type limits: ~azure.search.documents.models.ServiceLimits """ _validation = { @@ -4284,8 +4287,8 @@ def __init__( **kwargs ): super(ServiceStatistics, self).__init__(**kwargs) - self.counters = kwargs.get('counters', None) - self.limits = kwargs.get('limits', None) + self.counters = kwargs['counters'] + self.limits = kwargs['limits'] class ShaperSkill(SearchIndexerSkill): @@ -4308,10 +4311,10 @@ class ShaperSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -4413,11 +4416,11 @@ class SnowballTokenFilter(TokenFilter): spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. :type name: str - :param language: Required. The language to use. Possible values include: 'armenian', 'basque', - 'catalan', 'danish', 'dutch', 'english', 'finnish', 'french', 'german', 'german2', 'hungarian', - 'italian', 'kp', 'lovins', 'norwegian', 'porter', 'portuguese', 'romanian', 'russian', - 'spanish', 'swedish', 'turkish'. - :type language: str or ~search_service_client.models.SnowballTokenFilterLanguage + :param language: Required. The language to use. Possible values include: "armenian", "basque", + "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", "hungarian", + "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", "russian", + "spanish", "swedish", "turkish". + :type language: str or ~azure.search.documents.models.SnowballTokenFilterLanguage """ _validation = { @@ -4438,7 +4441,7 @@ def __init__( ): super(SnowballTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SnowballTokenFilter' - self.language = kwargs.get('language', None) + self.language = kwargs['language'] class SoftDeleteColumnDeletionDetectionPolicy(DataDeletionDetectionPolicy): @@ -4495,16 +4498,16 @@ class SplitSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'da', 'de', 'en', 'es', 'fi', 'fr', 'it', 'ko', 'pt'. - :type default_language_code: str or ~search_service_client.models.SplitSkillLanguage + Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt". + :type default_language_code: str or ~azure.search.documents.models.SplitSkillLanguage :param text_split_mode: A value indicating which split mode to perform. Possible values - include: 'pages', 'sentences'. - :type text_split_mode: str or ~search_service_client.models.TextSplitMode + include: "pages", "sentences". + :type text_split_mode: str or ~azure.search.documents.models.TextSplitMode :param maximum_page_length: The desired maximum page length. Default is 10000. :type maximum_page_length: int """ @@ -4599,7 +4602,7 @@ def __init__( ): super(StemmerOverrideTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' - self.rules = kwargs.get('rules', None) + self.rules = kwargs['rules'] class StemmerTokenFilter(TokenFilter): @@ -4614,16 +4617,16 @@ class StemmerTokenFilter(TokenFilter): spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. :type name: str - :param language: Required. The language to use. Possible values include: 'arabic', 'armenian', - 'basque', 'brazilian', 'bulgarian', 'catalan', 'czech', 'danish', 'dutch', 'dutchKp', - 'english', 'lightEnglish', 'minimalEnglish', 'possessiveEnglish', 'porter2', 'lovins', - 'finnish', 'lightFinnish', 'french', 'lightFrench', 'minimalFrench', 'galician', - 'minimalGalician', 'german', 'german2', 'lightGerman', 'minimalGerman', 'greek', 'hindi', - 'hungarian', 'lightHungarian', 'indonesian', 'irish', 'italian', 'lightItalian', 'sorani', - 'latvian', 'norwegian', 'lightNorwegian', 'minimalNorwegian', 'lightNynorsk', 'minimalNynorsk', - 'portuguese', 'lightPortuguese', 'minimalPortuguese', 'portugueseRslp', 'romanian', 'russian', - 'lightRussian', 'spanish', 'lightSpanish', 'swedish', 'lightSwedish', 'turkish'. - :type language: str or ~search_service_client.models.StemmerTokenFilterLanguage + :param language: Required. The language to use. Possible values include: "arabic", "armenian", + "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "dutchKp", + "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", "lovins", + "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician", + "minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi", + "hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani", + "latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk", + "portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian", + "lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish". + :type language: str or ~azure.search.documents.models.StemmerTokenFilterLanguage """ _validation = { @@ -4644,7 +4647,7 @@ def __init__( ): super(StemmerTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.StemmerTokenFilter' - self.language = kwargs.get('language', None) + self.language = kwargs['language'] class StopAnalyzer(LexicalAnalyzer): @@ -4699,12 +4702,12 @@ class StopwordsTokenFilter(TokenFilter): both be set. :type stopwords: list[str] :param stopwords_list: A predefined list of stopwords to use. This property and the stopwords - property cannot both be set. Default is English. Possible values include: 'arabic', 'armenian', - 'basque', 'brazilian', 'bulgarian', 'catalan', 'czech', 'danish', 'dutch', 'english', - 'finnish', 'french', 'galician', 'german', 'greek', 'hindi', 'hungarian', 'indonesian', - 'irish', 'italian', 'latvian', 'norwegian', 'persian', 'portuguese', 'romanian', 'russian', - 'sorani', 'spanish', 'swedish', 'thai', 'turkish'. - :type stopwords_list: str or ~search_service_client.models.StopwordsList + property cannot both be set. Default is English. Possible values include: "arabic", "armenian", + "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english", + "finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian", + "irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian", + "sorani", "spanish", "swedish", "thai", "turkish". + :type stopwords_list: str or ~azure.search.documents.models.StopwordsList :param ignore_case: A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. :type ignore_case: bool @@ -4775,8 +4778,8 @@ def __init__( **kwargs ): super(Suggester, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.source_fields = kwargs.get('source_fields', None) + self.name = kwargs['name'] + self.source_fields = kwargs['source_fields'] class SynonymMap(msrest.serialization.Model): @@ -4802,7 +4805,7 @@ class SynonymMap(msrest.serialization.Model): needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :type encryption_key: ~search_service_client.models.SearchResourceEncryptionKey + :type encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey :param e_tag: The ETag of the synonym map. :type e_tag: str """ @@ -4828,8 +4831,8 @@ def __init__( **kwargs ): super(SynonymMap, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.synonyms = kwargs.get('synonyms', None) + self.name = kwargs['name'] + self.synonyms = kwargs['synonyms'] self.encryption_key = kwargs.get('encryption_key', None) self.e_tag = kwargs.get('e_tag', None) @@ -4885,7 +4888,7 @@ def __init__( ): super(SynonymTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SynonymTokenFilter' - self.synonyms = kwargs.get('synonyms', None) + self.synonyms = kwargs['synonyms'] self.ignore_case = kwargs.get('ignore_case', False) self.expand = kwargs.get('expand', True) @@ -4904,11 +4907,11 @@ class TagScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the tag scoring function. - :type parameters: ~search_service_client.models.TagScoringParameters + :type parameters: ~azure.search.documents.models.TagScoringParameters """ _validation = { @@ -4932,7 +4935,7 @@ def __init__( ): super(TagScoringFunction, self).__init__(**kwargs) self.type = 'tag' - self.parameters = kwargs.get('parameters', None) + self.parameters = kwargs['parameters'] class TagScoringParameters(msrest.serialization.Model): @@ -4958,7 +4961,7 @@ def __init__( **kwargs ): super(TagScoringParameters, self).__init__(**kwargs) - self.tags_parameter = kwargs.get('tags_parameter', None) + self.tags_parameter = kwargs['tags_parameter'] class TextTranslationSkill(SearchIndexerSkill): @@ -4981,37 +4984,37 @@ class TextTranslationSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_to_language_code: Required. The language code to translate documents into for - documents that don't specify the to language explicitly. Possible values include: 'af', 'ar', - 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', - 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', - 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', - 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', - 'vi', 'cy', 'yua'. + documents that don't specify the to language explicitly. Possible values include: "af", "ar", + "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", + "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", + "tlh", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "otq", "ro", "ru", "sm", + "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", "ur", + "vi", "cy", "yua". :type default_to_language_code: str or - ~search_service_client.models.TextTranslationSkillLanguage + ~azure.search.documents.models.TextTranslationSkillLanguage :param default_from_language_code: The language code to translate documents from for documents - that don't specify the from language explicitly. Possible values include: 'af', 'ar', 'bn', - 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', 'fil', - 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', 'tlh', - 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', 'sr-Cyrl', - 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', 'vi', 'cy', - 'yua'. + that don't specify the from language explicitly. Possible values include: "af", "ar", "bn", + "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", + "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", + "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "otq", "ro", "ru", "sm", "sr-Cyrl", + "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", + "yua". :type default_from_language_code: str or - ~search_service_client.models.TextTranslationSkillLanguage + ~azure.search.documents.models.TextTranslationSkillLanguage :param suggested_from: The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the - automatic language detection is unsuccessful. Default is en. Possible values include: 'af', - 'ar', 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', - 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', - 'sw', 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', - 'sm', 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', - 'ur', 'vi', 'cy', 'yua'. - :type suggested_from: str or ~search_service_client.models.TextTranslationSkillLanguage + automatic language detection is unsuccessful. Default is en. Possible values include: "af", + "ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", + "fj", "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", + "sw", "tlh", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "otq", "ro", "ru", + "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", + "ur", "vi", "cy", "yua". + :type suggested_from: str or ~azure.search.documents.models.TextTranslationSkillLanguage """ _validation = { @@ -5039,7 +5042,7 @@ def __init__( ): super(TextTranslationSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.TranslationSkill' - self.default_to_language_code = kwargs.get('default_to_language_code', None) + self.default_to_language_code = kwargs['default_to_language_code'] self.default_from_language_code = kwargs.get('default_from_language_code', None) self.suggested_from = kwargs.get('suggested_from', None) @@ -5067,7 +5070,7 @@ def __init__( **kwargs ): super(TextWeights, self).__init__(**kwargs) - self.weights = kwargs.get('weights', None) + self.weights = kwargs['weights'] class TruncateTokenFilter(TokenFilter): @@ -5202,10 +5205,10 @@ class WebApiSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param uri: Required. The url for the Web API. :type uri: str :param http_headers: The headers required to make the http request. @@ -5249,7 +5252,7 @@ def __init__( ): super(WebApiSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Custom.WebApiSkill' - self.uri = kwargs.get('uri', None) + self.uri = kwargs['uri'] self.http_headers = kwargs.get('http_headers', None) self.http_method = kwargs.get('http_method', None) self.timeout = kwargs.get('timeout', None) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py index f21704937959..0dd3e1c170f3 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -10,6 +12,8 @@ from azure.core.exceptions import HttpResponseError import msrest.serialization +from ._search_service_client_enums import * + class AnalyzedTokenInfo(msrest.serialization.Model): """Information about a token returned by an analyzer. @@ -65,33 +69,33 @@ class AnalyzeRequest(msrest.serialization.Model): :type text: str :param analyzer: The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are - mutually exclusive. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', - 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- - Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', - 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', - 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', - 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', - 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', - 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', - 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', - 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', - 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- - PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', - 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', - 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', - 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', - 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', - 'simple', 'stop', 'whitespace'. - :type analyzer: str or ~search_service_client.models.LexicalAnalyzerName + mutually exclusive. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh- + Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt- + PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", "whitespace". + :type analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName :param tokenizer: The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters - are mutually exclusive. Possible values include: 'classic', 'edgeNGram', 'keyword_v2', - 'letter', 'lowercase', 'microsoft_language_tokenizer', 'microsoft_language_stemming_tokenizer', - 'nGram', 'path_hierarchy_v2', 'pattern', 'standard_v2', 'uax_url_email', 'whitespace'. - :type tokenizer: str or ~search_service_client.models.LexicalTokenizerName + are mutually exclusive. Possible values include: "classic", "edgeNGram", "keyword_v2", + "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", + "nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace". + :type tokenizer: str or ~azure.search.documents.models.LexicalTokenizerName :param token_filters: An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - :type token_filters: list[str or ~search_service_client.models.TokenFilterName] + :type token_filters: list[str or ~azure.search.documents.models.TokenFilterName] :param char_filters: An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. :type char_filters: list[str] @@ -133,7 +137,7 @@ class AnalyzeResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param tokens: Required. The list of tokens returned by the analyzer specified in the request. - :type tokens: list[~search_service_client.models.AnalyzedTokenInfo] + :type tokens: list[~azure.search.documents.models.AnalyzedTokenInfo] """ _validation = { @@ -192,7 +196,7 @@ def __init__( **kwargs ): super(TokenFilter, self).__init__(**kwargs) - self.odata_type = None + self.odata_type: Optional[str] = None self.name = name @@ -232,7 +236,7 @@ def __init__( **kwargs ): super(AsciiFoldingTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.AsciiFoldingTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.AsciiFoldingTokenFilter' self.preserve_original = preserve_original @@ -299,7 +303,7 @@ def __init__( **kwargs ): super(Similarity, self).__init__(**kwargs) - self.odata_type = None + self.odata_type: Optional[str] = None class BM25Similarity(Similarity): @@ -337,7 +341,7 @@ def __init__( **kwargs ): super(BM25Similarity, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.BM25Similarity' + self.odata_type: str = '#Microsoft.Azure.Search.BM25Similarity' self.k1 = k1 self.b = b @@ -380,7 +384,7 @@ def __init__( **kwargs ): super(CharFilter, self).__init__(**kwargs) - self.odata_type = None + self.odata_type: Optional[str] = None self.name = name @@ -397,7 +401,7 @@ class CjkBigramTokenFilter(TokenFilter): limited to 128 characters. :type name: str :param ignore_scripts: The scripts to ignore. - :type ignore_scripts: list[str or ~search_service_client.models.CjkBigramTokenFilterScripts] + :type ignore_scripts: list[str or ~azure.search.documents.models.CjkBigramTokenFilterScripts] :param output_unigrams: A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. :type output_unigrams: bool @@ -424,7 +428,7 @@ def __init__( **kwargs ): super(CjkBigramTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.CjkBigramTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.CjkBigramTokenFilter' self.ignore_scripts = ignore_scripts self.output_unigrams = output_unigrams @@ -451,7 +455,7 @@ def __init__( **kwargs ): super(ClassicSimilarity, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.ClassicSimilarity' + self.odata_type: str = '#Microsoft.Azure.Search.ClassicSimilarity' class LexicalTokenizer(msrest.serialization.Model): @@ -492,7 +496,7 @@ def __init__( **kwargs ): super(LexicalTokenizer, self).__init__(**kwargs) - self.odata_type = None + self.odata_type: Optional[str] = None self.name = name @@ -533,7 +537,7 @@ def __init__( **kwargs ): super(ClassicTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.ClassicTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.ClassicTokenizer' self.max_token_length = max_token_length @@ -572,7 +576,7 @@ def __init__( **kwargs ): super(CognitiveServicesAccount, self).__init__(**kwargs) - self.odata_type = None + self.odata_type: Optional[str] = None self.description = description @@ -610,7 +614,7 @@ def __init__( **kwargs ): super(CognitiveServicesAccountKey, self).__init__(description=description, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.CognitiveServicesByKey' + self.odata_type: str = '#Microsoft.Azure.Search.CognitiveServicesByKey' self.key = key @@ -661,7 +665,7 @@ def __init__( **kwargs ): super(CommonGramTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.CommonGramTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.CommonGramTokenFilter' self.common_words = common_words self.ignore_case = ignore_case self.use_query_mode = use_query_mode @@ -690,10 +694,10 @@ class SearchIndexerSkill(msrest.serialization.Model): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -726,7 +730,7 @@ def __init__( **kwargs ): super(SearchIndexerSkill, self).__init__(**kwargs) - self.odata_type = None + self.odata_type: Optional[str] = None self.name = name self.description = description self.context = context @@ -754,10 +758,10 @@ class ConditionalSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -786,7 +790,7 @@ def __init__( **kwargs ): super(ConditionalSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Util.ConditionalSkill' + self.odata_type: str = '#Microsoft.Skills.Util.ConditionalSkill' class CorsOptions(msrest.serialization.Model): @@ -862,7 +866,7 @@ def __init__( **kwargs ): super(LexicalAnalyzer, self).__init__(**kwargs) - self.odata_type = None + self.odata_type: Optional[str] = None self.name = name @@ -879,15 +883,15 @@ class CustomAnalyzer(LexicalAnalyzer): 128 characters. :type name: str :param tokenizer: Required. The name of the tokenizer to use to divide continuous text into a - sequence of tokens, such as breaking a sentence into words. Possible values include: 'classic', - 'edgeNGram', 'keyword_v2', 'letter', 'lowercase', 'microsoft_language_tokenizer', - 'microsoft_language_stemming_tokenizer', 'nGram', 'path_hierarchy_v2', 'pattern', - 'standard_v2', 'uax_url_email', 'whitespace'. - :type tokenizer: str or ~search_service_client.models.LexicalTokenizerName + sequence of tokens, such as breaking a sentence into words. Possible values include: "classic", + "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", + "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", + "standard_v2", "uax_url_email", "whitespace". + :type tokenizer: str or ~azure.search.documents.models.LexicalTokenizerName :param token_filters: A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. - :type token_filters: list[str or ~search_service_client.models.TokenFilterName] + :type token_filters: list[str or ~azure.search.documents.models.TokenFilterName] :param char_filters: A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. @@ -918,7 +922,7 @@ def __init__( **kwargs ): super(CustomAnalyzer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer' + self.odata_type: str = '#Microsoft.Azure.Search.CustomAnalyzer' self.tokenizer = tokenizer self.token_filters = token_filters self.char_filters = char_filters @@ -954,7 +958,7 @@ def __init__( **kwargs ): super(DataChangeDetectionPolicy, self).__init__(**kwargs) - self.odata_type = None + self.odata_type: Optional[str] = None class DataDeletionDetectionPolicy(msrest.serialization.Model): @@ -987,7 +991,7 @@ def __init__( **kwargs ): super(DataDeletionDetectionPolicy, self).__init__(**kwargs) - self.odata_type = None + self.odata_type: Optional[str] = None class DataSourceCredentials(msrest.serialization.Model): @@ -1039,7 +1043,7 @@ def __init__( **kwargs ): super(DefaultCognitiveServicesAccount, self).__init__(description=description, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.DefaultCognitiveServices' + self.odata_type: str = '#Microsoft.Azure.Search.DefaultCognitiveServices' class DictionaryDecompounderTokenFilter(TokenFilter): @@ -1101,7 +1105,7 @@ def __init__( **kwargs ): super(DictionaryDecompounderTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' self.word_list = word_list self.min_word_size = min_word_size self.min_subword_size = min_subword_size @@ -1126,9 +1130,9 @@ class ScoringFunction(msrest.serialization.Model): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation """ _validation = { @@ -1157,7 +1161,7 @@ def __init__( **kwargs ): super(ScoringFunction, self).__init__(**kwargs) - self.type = None + self.type: Optional[str] = None self.field_name = field_name self.boost = boost self.interpolation = interpolation @@ -1177,11 +1181,11 @@ class DistanceScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the distance scoring function. - :type parameters: ~search_service_client.models.DistanceScoringParameters + :type parameters: ~azure.search.documents.models.DistanceScoringParameters """ _validation = { @@ -1209,7 +1213,7 @@ def __init__( **kwargs ): super(DistanceScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) - self.type = 'distance' + self.type: str = 'distance' self.parameters = parameters @@ -1266,8 +1270,8 @@ class EdgeNGramTokenFilter(TokenFilter): :param max_gram: The maximum n-gram length. Default is 2. :type max_gram: int :param side: Specifies which side of the input the n-gram should be generated from. Default is - "front". Possible values include: 'front', 'back'. - :type side: str or ~search_service_client.models.EdgeNGramTokenFilterSide + "front". Possible values include: "front", "back". + :type side: str or ~azure.search.documents.models.EdgeNGramTokenFilterSide """ _validation = { @@ -1293,7 +1297,7 @@ def __init__( **kwargs ): super(EdgeNGramTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.EdgeNGramTokenFilter' self.min_gram = min_gram self.max_gram = max_gram self.side = side @@ -1317,8 +1321,8 @@ class EdgeNGramTokenFilterV2(TokenFilter): :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :type max_gram: int :param side: Specifies which side of the input the n-gram should be generated from. Default is - "front". Possible values include: 'front', 'back'. - :type side: str or ~search_service_client.models.EdgeNGramTokenFilterSide + "front". Possible values include: "front", "back". + :type side: str or ~azure.search.documents.models.EdgeNGramTokenFilterSide """ _validation = { @@ -1346,7 +1350,7 @@ def __init__( **kwargs ): super(EdgeNGramTokenFilterV2, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2' + self.odata_type: str = '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2' self.min_gram = min_gram self.max_gram = max_gram self.side = side @@ -1370,7 +1374,7 @@ class EdgeNGramTokenizer(LexicalTokenizer): :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :type max_gram: int :param token_chars: Character classes to keep in the tokens. - :type token_chars: list[str or ~search_service_client.models.TokenCharacterKind] + :type token_chars: list[str or ~azure.search.documents.models.TokenCharacterKind] """ _validation = { @@ -1398,7 +1402,7 @@ def __init__( **kwargs ): super(EdgeNGramTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.EdgeNGramTokenizer' self.min_gram = min_gram self.max_gram = max_gram self.token_chars = token_chars @@ -1439,7 +1443,7 @@ def __init__( **kwargs ): super(ElisionTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.ElisionTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.ElisionTokenFilter' self.articles = articles @@ -1463,17 +1467,17 @@ class EntityRecognitionSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param categories: A list of entity categories that should be extracted. - :type categories: list[str or ~search_service_client.models.EntityCategory] + :type categories: list[str or ~azure.search.documents.models.EntityCategory] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'ar', 'cs', 'zh-Hans', 'zh-Hant', 'da', 'nl', 'en', 'fi', 'fr', 'de', - 'el', 'hu', 'it', 'ja', 'ko', 'no', 'pl', 'pt-PT', 'pt-BR', 'ru', 'es', 'sv', 'tr'. + Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", + "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr". :type default_language_code: str or - ~search_service_client.models.EntityRecognitionSkillLanguage + ~azure.search.documents.models.EntityRecognitionSkillLanguage :param include_typeless_entities: Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not @@ -1519,7 +1523,7 @@ def __init__( **kwargs ): super(EntityRecognitionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.EntityRecognitionSkill' + self.odata_type: str = '#Microsoft.Skills.Text.EntityRecognitionSkill' self.categories = categories self.default_language_code = default_language_code self.include_typeless_entities = include_typeless_entities @@ -1537,7 +1541,7 @@ class FieldMapping(msrest.serialization.Model): name by default. :type target_field_name: str :param mapping_function: A function to apply to each source field value before indexing. - :type mapping_function: ~search_service_client.models.FieldMappingFunction + :type mapping_function: ~azure.search.documents.models.FieldMappingFunction """ _validation = { @@ -1611,11 +1615,11 @@ class FreshnessScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the freshness scoring function. - :type parameters: ~search_service_client.models.FreshnessScoringParameters + :type parameters: ~azure.search.documents.models.FreshnessScoringParameters """ _validation = { @@ -1643,7 +1647,7 @@ def __init__( **kwargs ): super(FreshnessScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) - self.type = 'freshness' + self.type: str = 'freshness' self.parameters = parameters @@ -1736,7 +1740,7 @@ def __init__( **kwargs ): super(HighWaterMarkChangeDetectionPolicy, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' + self.odata_type: str = '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' self.high_water_mark_column_name = high_water_mark_column_name @@ -1760,17 +1764,17 @@ class ImageAnalysisSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'en', 'es', 'ja', 'pt', 'zh'. - :type default_language_code: str or ~search_service_client.models.ImageAnalysisSkillLanguage + Possible values include: "en", "es", "ja", "pt", "zh". + :type default_language_code: str or ~azure.search.documents.models.ImageAnalysisSkillLanguage :param visual_features: A list of visual features. - :type visual_features: list[str or ~search_service_client.models.VisualFeature] + :type visual_features: list[str or ~azure.search.documents.models.VisualFeature] :param details: A string indicating which domain-specific details to return. - :type details: list[str or ~search_service_client.models.ImageDetail] + :type details: list[str or ~azure.search.documents.models.ImageDetail] """ _validation = { @@ -1805,7 +1809,7 @@ def __init__( **kwargs ): super(ImageAnalysisSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Vision.ImageAnalysisSkill' + self.odata_type: str = '#Microsoft.Skills.Vision.ImageAnalysisSkill' self.default_language_code = default_language_code self.visual_features = visual_features self.details = details @@ -1819,8 +1823,8 @@ class IndexerExecutionResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar status: Required. The outcome of this indexer execution. Possible values include: - 'transientFailure', 'success', 'inProgress', 'reset'. - :vartype status: str or ~search_service_client.models.IndexerExecutionStatus + "transientFailure", "success", "inProgress", "reset". + :vartype status: str or ~azure.search.documents.models.IndexerExecutionStatus :ivar error_message: The error message indicating the top-level error, if any. :vartype error_message: str :ivar start_time: The start time of this indexer execution. @@ -1828,9 +1832,9 @@ class IndexerExecutionResult(msrest.serialization.Model): :ivar end_time: The end time of this indexer execution, if the execution has already completed. :vartype end_time: ~datetime.datetime :ivar errors: Required. The item-level indexing errors. - :vartype errors: list[~search_service_client.models.SearchIndexerError] + :vartype errors: list[~azure.search.documents.models.SearchIndexerError] :ivar warnings: Required. The item-level indexing warnings. - :vartype warnings: list[~search_service_client.models.SearchIndexerWarning] + :vartype warnings: list[~azure.search.documents.models.SearchIndexerWarning] :ivar item_count: Required. The number of items that were processed during this indexer execution. This includes both successfully processed items and items where indexing was attempted but failed. @@ -1971,7 +1975,7 @@ class InputFieldMappingEntry(msrest.serialization.Model): :param source_context: The source context used for selecting recursive inputs. :type source_context: str :param inputs: The recursive inputs used when creating a complex type. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] """ _validation = { @@ -2042,7 +2046,7 @@ def __init__( **kwargs ): super(KeepTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.KeepTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.KeepTokenFilter' self.keep_words = keep_words self.lower_case_keep_words = lower_case_keep_words @@ -2067,15 +2071,15 @@ class KeyPhraseExtractionSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'it', 'ja', 'ko', 'no', 'pl', 'pt- - PT', 'pt-BR', 'ru', 'es', 'sv'. + Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl", "pt- + PT", "pt-BR", "ru", "es", "sv". :type default_language_code: str or - ~search_service_client.models.KeyPhraseExtractionSkillLanguage + ~azure.search.documents.models.KeyPhraseExtractionSkillLanguage :param max_key_phrase_count: A number indicating how many key phrases to return. If absent, all identified key phrases will be returned. :type max_key_phrase_count: int @@ -2111,7 +2115,7 @@ def __init__( **kwargs ): super(KeyPhraseExtractionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.KeyPhraseExtractionSkill' + self.odata_type: str = '#Microsoft.Skills.Text.KeyPhraseExtractionSkill' self.default_language_code = default_language_code self.max_key_phrase_count = max_key_phrase_count @@ -2157,7 +2161,7 @@ def __init__( **kwargs ): super(KeywordMarkerTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' self.keywords = keywords self.ignore_case = ignore_case @@ -2197,7 +2201,7 @@ def __init__( **kwargs ): super(KeywordTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.KeywordTokenizer' self.buffer_size = buffer_size @@ -2238,7 +2242,7 @@ def __init__( **kwargs ): super(KeywordTokenizerV2, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizerV2' + self.odata_type: str = '#Microsoft.Azure.Search.KeywordTokenizerV2' self.max_token_length = max_token_length @@ -2262,10 +2266,10 @@ class LanguageDetectionSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -2294,7 +2298,7 @@ def __init__( **kwargs ): super(LanguageDetectionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.LanguageDetectionSkill' + self.odata_type: str = '#Microsoft.Skills.Text.LanguageDetectionSkill' class LengthTokenFilter(TokenFilter): @@ -2339,7 +2343,7 @@ def __init__( **kwargs ): super(LengthTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.LengthTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.LengthTokenFilter' self.min_length = min_length self.max_length = max_length @@ -2384,7 +2388,7 @@ def __init__( **kwargs ): super(LimitTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.LimitTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.LimitTokenFilter' self.max_token_count = max_token_count self.consume_all_tokens = consume_all_tokens @@ -2397,7 +2401,7 @@ class ListDataSourcesResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar data_sources: Required. The datasources in the Search service. - :vartype data_sources: list[~search_service_client.models.SearchIndexerDataSource] + :vartype data_sources: list[~azure.search.documents.models.SearchIndexerDataSource] """ _validation = { @@ -2424,7 +2428,7 @@ class ListIndexersResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar indexers: Required. The indexers in the Search service. - :vartype indexers: list[~search_service_client.models.SearchIndexer] + :vartype indexers: list[~azure.search.documents.models.SearchIndexer] """ _validation = { @@ -2451,7 +2455,7 @@ class ListIndexesResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar indexes: Required. The indexes in the Search service. - :vartype indexes: list[~search_service_client.models.SearchIndex] + :vartype indexes: list[~azure.search.documents.models.SearchIndex] """ _validation = { @@ -2478,7 +2482,7 @@ class ListSkillsetsResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar skillsets: Required. The skillsets defined in the Search service. - :vartype skillsets: list[~search_service_client.models.SearchIndexerSkillset] + :vartype skillsets: list[~azure.search.documents.models.SearchIndexerSkillset] """ _validation = { @@ -2505,7 +2509,7 @@ class ListSynonymMapsResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar synonym_maps: Required. The synonym maps in the Search service. - :vartype synonym_maps: list[~search_service_client.models.SynonymMap] + :vartype synonym_maps: list[~azure.search.documents.models.SynonymMap] """ _validation = { @@ -2565,7 +2569,7 @@ def __init__( **kwargs ): super(LuceneStandardAnalyzer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' + self.odata_type: str = '#Microsoft.Azure.Search.StandardAnalyzer' self.max_token_length = max_token_length self.stopwords = stopwords @@ -2606,7 +2610,7 @@ def __init__( **kwargs ): super(LuceneStandardTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.StandardTokenizer' self.max_token_length = max_token_length @@ -2647,7 +2651,7 @@ def __init__( **kwargs ): super(LuceneStandardTokenizerV2, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' + self.odata_type: str = '#Microsoft.Azure.Search.StandardTokenizerV2' self.max_token_length = max_token_length @@ -2665,11 +2669,11 @@ class MagnitudeScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the magnitude scoring function. - :type parameters: ~search_service_client.models.MagnitudeScoringParameters + :type parameters: ~azure.search.documents.models.MagnitudeScoringParameters """ _validation = { @@ -2697,7 +2701,7 @@ def __init__( **kwargs ): super(MagnitudeScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) - self.type = 'magnitude' + self.type: str = 'magnitude' self.parameters = parameters @@ -2777,7 +2781,7 @@ def __init__( **kwargs ): super(MappingCharFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.MappingCharFilter' + self.odata_type: str = '#Microsoft.Azure.Search.MappingCharFilter' self.mappings = mappings @@ -2801,10 +2805,10 @@ class MergeSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is an empty space. :type insert_pre_tag: str @@ -2843,7 +2847,7 @@ def __init__( **kwargs ): super(MergeSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.MergeSkill' + self.odata_type: str = '#Microsoft.Skills.Text.MergeSkill' self.insert_pre_tag = insert_pre_tag self.insert_post_tag = insert_post_tag @@ -2869,13 +2873,13 @@ class MicrosoftLanguageStemmingTokenizer(LexicalTokenizer): as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. :type is_search_tokenizer: bool :param language: The language to use. The default is English. Possible values include: - 'arabic', 'bangla', 'bulgarian', 'catalan', 'croatian', 'czech', 'danish', 'dutch', 'english', - 'estonian', 'finnish', 'french', 'german', 'greek', 'gujarati', 'hebrew', 'hindi', 'hungarian', - 'icelandic', 'indonesian', 'italian', 'kannada', 'latvian', 'lithuanian', 'malay', 'malayalam', - 'marathi', 'norwegianBokmaal', 'polish', 'portuguese', 'portugueseBrazilian', 'punjabi', - 'romanian', 'russian', 'serbianCyrillic', 'serbianLatin', 'slovak', 'slovenian', 'spanish', - 'swedish', 'tamil', 'telugu', 'turkish', 'ukrainian', 'urdu'. - :type language: str or ~search_service_client.models.MicrosoftStemmingTokenizerLanguage + "arabic", "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english", + "estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian", + "icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam", + "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", + "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish", + "swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu". + :type language: str or ~azure.search.documents.models.MicrosoftStemmingTokenizerLanguage """ _validation = { @@ -2902,7 +2906,7 @@ def __init__( **kwargs ): super(MicrosoftLanguageStemmingTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer' self.max_token_length = max_token_length self.is_search_tokenizer = is_search_tokenizer self.language = language @@ -2929,13 +2933,13 @@ class MicrosoftLanguageTokenizer(LexicalTokenizer): as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. :type is_search_tokenizer: bool :param language: The language to use. The default is English. Possible values include: - 'bangla', 'bulgarian', 'catalan', 'chineseSimplified', 'chineseTraditional', 'croatian', - 'czech', 'danish', 'dutch', 'english', 'french', 'german', 'greek', 'gujarati', 'hindi', - 'icelandic', 'indonesian', 'italian', 'japanese', 'kannada', 'korean', 'malay', 'malayalam', - 'marathi', 'norwegianBokmaal', 'polish', 'portuguese', 'portugueseBrazilian', 'punjabi', - 'romanian', 'russian', 'serbianCyrillic', 'serbianLatin', 'slovenian', 'spanish', 'swedish', - 'tamil', 'telugu', 'thai', 'ukrainian', 'urdu', 'vietnamese'. - :type language: str or ~search_service_client.models.MicrosoftTokenizerLanguage + "bangla", "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", + "czech", "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", + "icelandic", "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", + "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", + "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", + "tamil", "telugu", "thai", "ukrainian", "urdu", "vietnamese". + :type language: str or ~azure.search.documents.models.MicrosoftTokenizerLanguage """ _validation = { @@ -2962,7 +2966,7 @@ def __init__( **kwargs ): super(MicrosoftLanguageTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer' self.max_token_length = max_token_length self.is_search_tokenizer = is_search_tokenizer self.language = language @@ -3008,7 +3012,7 @@ def __init__( **kwargs ): super(NGramTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.NGramTokenFilter' self.min_gram = min_gram self.max_gram = max_gram @@ -3055,7 +3059,7 @@ def __init__( **kwargs ): super(NGramTokenFilterV2, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilterV2' + self.odata_type: str = '#Microsoft.Azure.Search.NGramTokenFilterV2' self.min_gram = min_gram self.max_gram = max_gram @@ -3078,7 +3082,7 @@ class NGramTokenizer(LexicalTokenizer): :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :type max_gram: int :param token_chars: Character classes to keep in the tokens. - :type token_chars: list[str or ~search_service_client.models.TokenCharacterKind] + :type token_chars: list[str or ~azure.search.documents.models.TokenCharacterKind] """ _validation = { @@ -3106,7 +3110,7 @@ def __init__( **kwargs ): super(NGramTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.NGramTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.NGramTokenizer' self.min_gram = min_gram self.max_gram = max_gram self.token_chars = token_chars @@ -3132,18 +3136,18 @@ class OcrSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param text_extraction_algorithm: A value indicating which algorithm to use for extracting - text. Default is printed. Possible values include: 'printed', 'handwritten'. - :type text_extraction_algorithm: str or ~search_service_client.models.TextExtractionAlgorithm + text. Default is printed. Possible values include: "printed", "handwritten". + :type text_extraction_algorithm: str or ~azure.search.documents.models.TextExtractionAlgorithm :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', - 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr- - Latn', 'sk'. - :type default_language_code: str or ~search_service_client.models.OcrSkillLanguage + Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el", + "hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl", "sr- + Latn", "sk". + :type default_language_code: str or ~azure.search.documents.models.OcrSkillLanguage :param should_detect_orientation: A value indicating to turn orientation detection on or not. Default is false. :type should_detect_orientation: bool @@ -3181,7 +3185,7 @@ def __init__( **kwargs ): super(OcrSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Vision.OcrSkill' + self.odata_type: str = '#Microsoft.Skills.Vision.OcrSkill' self.text_extraction_algorithm = text_extraction_algorithm self.default_language_code = default_language_code self.should_detect_orientation = should_detect_orientation @@ -3272,7 +3276,7 @@ def __init__( **kwargs ): super(PathHierarchyTokenizerV2, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PathHierarchyTokenizerV2' + self.odata_type: str = '#Microsoft.Azure.Search.PathHierarchyTokenizerV2' self.delimiter = delimiter self.replacement = replacement self.max_token_length = max_token_length @@ -3298,9 +3302,9 @@ class PatternAnalyzer(LexicalAnalyzer): :param pattern: A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. :type pattern: str - :param flags: Regular expression flags. Possible values include: 'CANON_EQ', - 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. - :type flags: str or ~search_service_client.models.RegexFlags + :param flags: Regular expression flags. Possible values include: "CANON_EQ", + "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :type flags: str or ~azure.search.documents.models.RegexFlags :param stopwords: A list of stopwords. :type stopwords: list[str] """ @@ -3330,7 +3334,7 @@ def __init__( **kwargs ): super(PatternAnalyzer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PatternAnalyzer' + self.odata_type: str = '#Microsoft.Azure.Search.PatternAnalyzer' self.lower_case_terms = lower_case_terms self.pattern = pattern self.flags = flags @@ -3378,7 +3382,7 @@ def __init__( **kwargs ): super(PatternCaptureTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PatternCaptureTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.PatternCaptureTokenFilter' self.patterns = patterns self.preserve_original = preserve_original @@ -3424,7 +3428,7 @@ def __init__( **kwargs ): super(PatternReplaceCharFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PatternReplaceCharFilter' + self.odata_type: str = '#Microsoft.Azure.Search.PatternReplaceCharFilter' self.pattern = pattern self.replacement = replacement @@ -3470,7 +3474,7 @@ def __init__( **kwargs ): super(PatternReplaceTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' self.pattern = pattern self.replacement = replacement @@ -3490,9 +3494,9 @@ class PatternTokenizer(LexicalTokenizer): :param pattern: A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. :type pattern: str - :param flags: Regular expression flags. Possible values include: 'CANON_EQ', - 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. - :type flags: str or ~search_service_client.models.RegexFlags + :param flags: Regular expression flags. Possible values include: "CANON_EQ", + "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :type flags: str or ~azure.search.documents.models.RegexFlags :param group: The zero-based ordinal of the matching group in the regular expression pattern to extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. @@ -3522,7 +3526,7 @@ def __init__( **kwargs ): super(PatternTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PatternTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.PatternTokenizer' self.pattern = pattern self.flags = flags self.group = group @@ -3541,9 +3545,9 @@ class PhoneticTokenFilter(TokenFilter): limited to 128 characters. :type name: str :param encoder: The phonetic encoder to use. Default is "metaphone". Possible values include: - 'metaphone', 'doubleMetaphone', 'soundex', 'refinedSoundex', 'caverphone1', 'caverphone2', - 'cologne', 'nysiis', 'koelnerPhonetik', 'haasePhonetik', 'beiderMorse'. - :type encoder: str or ~search_service_client.models.PhoneticEncoder + "metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2", + "cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse". + :type encoder: str or ~azure.search.documents.models.PhoneticEncoder :param replace_original_tokens: A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. :type replace_original_tokens: bool @@ -3570,7 +3574,7 @@ def __init__( **kwargs ): super(PhoneticTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PhoneticTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.PhoneticTokenFilter' self.encoder = encoder self.replace_original_tokens = replace_original_tokens @@ -3637,13 +3641,13 @@ class ScoringProfile(msrest.serialization.Model): :type name: str :param text_weights: Parameters that boost scoring based on text matches in certain index fields. - :type text_weights: ~search_service_client.models.TextWeights + :type text_weights: ~azure.search.documents.models.TextWeights :param functions: The collection of functions that influence the scoring of documents. - :type functions: list[~search_service_client.models.ScoringFunction] + :type functions: list[~azure.search.documents.models.ScoringFunction] :param function_aggregation: A value indicating how the results of individual scoring functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Possible - values include: 'sum', 'average', 'minimum', 'maximum', 'firstMatching'. - :type function_aggregation: str or ~search_service_client.models.ScoringFunctionAggregation + values include: "sum", "average", "minimum", "maximum", "firstMatching". + :type function_aggregation: str or ~azure.search.documents.models.ScoringFunctionAggregation """ _validation = { @@ -3685,7 +3689,7 @@ class SearchError(msrest.serialization.Model): :ivar message: Required. A human-readable representation of the error. :vartype message: str :ivar details: An array of details about specific errors that led to this reported error. - :vartype details: list[~search_service_client.models.SearchError] + :vartype details: list[~azure.search.documents.models.SearchError] """ _validation = { @@ -3718,10 +3722,10 @@ class SearchField(msrest.serialization.Model): :param name: Required. The name of the field, which must be unique within the fields collection of the index or parent field. :type name: str - :param type: Required. The data type of the field. Possible values include: 'Edm.String', - 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', - 'Edm.GeographyPoint', 'Edm.ComplexType'. - :type type: str or ~search_service_client.models.SearchFieldDataType + :param type: Required. The data type of the field. Possible values include: "Edm.String", + "Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset", + "Edm.GeographyPoint", "Edm.ComplexType". + :type type: str or ~azure.search.documents.models.SearchFieldDataType :param key: A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type Edm.String. Key fields can be used to look up documents directly and update or delete @@ -3772,70 +3776,70 @@ class SearchField(msrest.serialization.Model): :param analyzer: The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null - for complex fields. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', - 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- - Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', - 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', - 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', - 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', - 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', - 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', - 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', - 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', - 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- - PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', - 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', - 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', - 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', - 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', - 'simple', 'stop', 'whitespace'. - :type analyzer: str or ~search_service_client.models.LexicalAnalyzerName + for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh- + Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt- + PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", "whitespace". + :type analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName :param search_analyzer: The name of the analyzer used at search time for the field. This option can be used only with searchable fields. It must be set together with indexAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be updated on an existing field. Must be null for complex fields. Possible values - include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft', - 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh-Hans.lucene', 'zh- - Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', 'cs.lucene', 'da.microsoft', - 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', 'en.lucene', 'et.microsoft', - 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', 'gl.lucene', 'de.microsoft', - 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', 'he.microsoft', 'hi.microsoft', - 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', 'id.microsoft', 'id.lucene', - 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', 'ja.lucene', 'kn.microsoft', - 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', 'lt.microsoft', 'ml.microsoft', - 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', 'fa.lucene', 'pl.microsoft', - 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', 'pt-PT.lucene', - 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- - cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', - 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', - 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', - 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', - 'whitespace'. - :type search_analyzer: str or ~search_service_client.models.LexicalAnalyzerName + include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", + "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh- + Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", "da.microsoft", + "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", "et.microsoft", + "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", "de.microsoft", + "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", "hi.microsoft", + "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", "id.lucene", + "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", "kn.microsoft", + "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", "ml.microsoft", + "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", "pl.microsoft", + "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", "pt-PT.lucene", + "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", "sr- + cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :type search_analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName :param index_analyzer: The name of the analyzer used at indexing time for the field. This option can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. - Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', - 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh- - Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', - 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', - 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', - 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', - 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', - 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', - 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', - 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', - 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', - 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- - cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', - 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', - 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', - 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', - 'whitespace'. - :type index_analyzer: str or ~search_service_client.models.LexicalAnalyzerName + Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", + "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh- + Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", + "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", + "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", + "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", + "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", + "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", + "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", + "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", + "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", + "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", "sr- + cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :type index_analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName :param synonym_maps: A list of the names of synonym maps to associate with this field. This option can be used only with searchable fields. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are @@ -3844,7 +3848,7 @@ class SearchField(msrest.serialization.Model): :type synonym_maps: list[str] :param fields: A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields. - :type fields: list[~search_service_client.models.SearchField] + :type fields: list[~azure.search.documents.models.SearchField] """ _validation = { @@ -3910,25 +3914,25 @@ class SearchIndex(msrest.serialization.Model): :param name: Required. The name of the index. :type name: str :param fields: Required. The fields of the index. - :type fields: list[~search_service_client.models.SearchField] + :type fields: list[~azure.search.documents.models.SearchField] :param scoring_profiles: The scoring profiles for the index. - :type scoring_profiles: list[~search_service_client.models.ScoringProfile] + :type scoring_profiles: list[~azure.search.documents.models.ScoringProfile] :param default_scoring_profile: The name of the scoring profile to use if none is specified in the query. If this property is not set and no scoring profile is specified in the query, then default scoring (tf-idf) will be used. :type default_scoring_profile: str :param cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. - :type cors_options: ~search_service_client.models.CorsOptions + :type cors_options: ~azure.search.documents.models.CorsOptions :param suggesters: The suggesters for the index. - :type suggesters: list[~search_service_client.models.Suggester] + :type suggesters: list[~azure.search.documents.models.Suggester] :param analyzers: The analyzers for the index. - :type analyzers: list[~search_service_client.models.LexicalAnalyzer] + :type analyzers: list[~azure.search.documents.models.LexicalAnalyzer] :param tokenizers: The tokenizers for the index. - :type tokenizers: list[~search_service_client.models.LexicalTokenizer] + :type tokenizers: list[~azure.search.documents.models.LexicalTokenizer] :param token_filters: The token filters for the index. - :type token_filters: list[~search_service_client.models.TokenFilter] + :type token_filters: list[~azure.search.documents.models.TokenFilter] :param char_filters: The character filters for the index. - :type char_filters: list[~search_service_client.models.CharFilter] + :type char_filters: list[~azure.search.documents.models.CharFilter] :param encryption_key: A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive @@ -3937,12 +3941,12 @@ class SearchIndex(msrest.serialization.Model): needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :type encryption_key: ~search_service_client.models.SearchResourceEncryptionKey + :type encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey :param similarity: The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. - :type similarity: ~search_service_client.models.Similarity + :type similarity: ~azure.search.documents.models.Similarity :param e_tag: The ETag of the index. :type e_tag: str """ @@ -4019,15 +4023,15 @@ class SearchIndexer(msrest.serialization.Model): :param target_index_name: Required. The name of the index to which this indexer writes data. :type target_index_name: str :param schedule: The schedule for this indexer. - :type schedule: ~search_service_client.models.IndexingSchedule + :type schedule: ~azure.search.documents.models.IndexingSchedule :param parameters: Parameters for indexer execution. - :type parameters: ~search_service_client.models.IndexingParameters + :type parameters: ~azure.search.documents.models.IndexingParameters :param field_mappings: Defines mappings between fields in the data source and corresponding target fields in the index. - :type field_mappings: list[~search_service_client.models.FieldMapping] + :type field_mappings: list[~azure.search.documents.models.FieldMapping] :param output_field_mappings: Output field mappings are applied after enrichment and immediately before indexing. - :type output_field_mappings: list[~search_service_client.models.FieldMapping] + :type output_field_mappings: list[~azure.search.documents.models.FieldMapping] :param is_disabled: A value indicating whether the indexer is disabled. Default is false. :type is_disabled: bool :param e_tag: The ETag of the indexer. @@ -4127,17 +4131,18 @@ class SearchIndexerDataSource(msrest.serialization.Model): :type name: str :param description: The description of the datasource. :type description: str - :param type: Required. The type of the datasource. Possible values include: 'azuresql', - 'cosmosdb', 'azureblob', 'azuretable', 'mysql'. - :type type: str or ~search_service_client.models.SearchIndexerDataSourceType + :param type: Required. The type of the datasource. Possible values include: "azuresql", + "cosmosdb", "azureblob", "azuretable", "mysql". + :type type: str or ~azure.search.documents.models.SearchIndexerDataSourceType :param credentials: Required. Credentials for the datasource. - :type credentials: ~search_service_client.models.DataSourceCredentials + :type credentials: ~azure.search.documents.models.DataSourceCredentials :param container: Required. The data container for the datasource. - :type container: ~search_service_client.models.SearchIndexerDataContainer + :type container: ~azure.search.documents.models.SearchIndexerDataContainer :param data_change_detection_policy: The data change detection policy for the datasource. - :type data_change_detection_policy: ~search_service_client.models.DataChangeDetectionPolicy + :type data_change_detection_policy: ~azure.search.documents.models.DataChangeDetectionPolicy :param data_deletion_detection_policy: The data deletion detection policy for the datasource. - :type data_deletion_detection_policy: ~search_service_client.models.DataDeletionDetectionPolicy + :type data_deletion_detection_policy: + ~azure.search.documents.models.DataDeletionDetectionPolicy :param e_tag: The ETag of the data source. :type e_tag: str """ @@ -4291,10 +4296,10 @@ class SearchIndexerSkillset(msrest.serialization.Model): :param description: Required. The description of the skillset. :type description: str :param skills: Required. A list of skills in the skillset. - :type skills: list[~search_service_client.models.SearchIndexerSkill] + :type skills: list[~azure.search.documents.models.SearchIndexerSkill] :param cognitive_services_account: Details about cognitive services to be used when running skills. - :type cognitive_services_account: ~search_service_client.models.CognitiveServicesAccount + :type cognitive_services_account: ~azure.search.documents.models.CognitiveServicesAccount :param e_tag: The ETag of the skillset. :type e_tag: str """ @@ -4338,16 +4343,16 @@ class SearchIndexerStatus(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :ivar status: Required. Overall indexer status. Possible values include: 'unknown', 'error', - 'running'. - :vartype status: str or ~search_service_client.models.IndexerStatus + :ivar status: Required. Overall indexer status. Possible values include: "unknown", "error", + "running". + :vartype status: str or ~azure.search.documents.models.IndexerStatus :ivar last_result: The result of the most recent or an in-progress indexer execution. - :vartype last_result: ~search_service_client.models.IndexerExecutionResult + :vartype last_result: ~azure.search.documents.models.IndexerExecutionResult :ivar execution_history: Required. History of the recent indexer executions, sorted in reverse chronological order. - :vartype execution_history: list[~search_service_client.models.IndexerExecutionResult] + :vartype execution_history: list[~azure.search.documents.models.IndexerExecutionResult] :ivar limits: Required. The execution limits for the indexer. - :vartype limits: ~search_service_client.models.SearchIndexerLimits + :vartype limits: ~azure.search.documents.models.SearchIndexerLimits """ _validation = { @@ -4444,7 +4449,7 @@ class SearchResourceEncryptionKey(msrest.serialization.Model): :param access_credentials: Optional Azure Active Directory credentials used for accessing your Azure Key Vault. Not required if using managed identity instead. :type access_credentials: - ~search_service_client.models.AzureActiveDirectoryApplicationCredentials + ~azure.search.documents.models.AzureActiveDirectoryApplicationCredentials """ _validation = { @@ -4496,14 +4501,14 @@ class SentimentSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'it', 'no', 'pl', 'pt-PT', - 'ru', 'es', 'sv', 'tr'. - :type default_language_code: str or ~search_service_client.models.SentimentSkillLanguage + Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", + "ru", "es", "sv", "tr". + :type default_language_code: str or ~azure.search.documents.models.SentimentSkillLanguage """ _validation = { @@ -4534,7 +4539,7 @@ def __init__( **kwargs ): super(SentimentSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.SentimentSkill' + self.odata_type: str = '#Microsoft.Skills.Text.SentimentSkill' self.default_language_code = default_language_code @@ -4544,19 +4549,19 @@ class ServiceCounters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param document_counter: Required. Total number of documents across all indexes in the service. - :type document_counter: ~search_service_client.models.ResourceCounter + :type document_counter: ~azure.search.documents.models.ResourceCounter :param index_counter: Required. Total number of indexes. - :type index_counter: ~search_service_client.models.ResourceCounter + :type index_counter: ~azure.search.documents.models.ResourceCounter :param indexer_counter: Required. Total number of indexers. - :type indexer_counter: ~search_service_client.models.ResourceCounter + :type indexer_counter: ~azure.search.documents.models.ResourceCounter :param data_source_counter: Required. Total number of data sources. - :type data_source_counter: ~search_service_client.models.ResourceCounter + :type data_source_counter: ~azure.search.documents.models.ResourceCounter :param storage_size_counter: Required. Total size of used storage in bytes. - :type storage_size_counter: ~search_service_client.models.ResourceCounter + :type storage_size_counter: ~azure.search.documents.models.ResourceCounter :param synonym_map_counter: Required. Total number of synonym maps. - :type synonym_map_counter: ~search_service_client.models.ResourceCounter + :type synonym_map_counter: ~azure.search.documents.models.ResourceCounter :param skillset_counter: Required. Total number of skillsets. - :type skillset_counter: ~search_service_client.models.ResourceCounter + :type skillset_counter: ~azure.search.documents.models.ResourceCounter """ _validation = { @@ -4646,9 +4651,9 @@ class ServiceStatistics(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param counters: Required. Service level resource counters. - :type counters: ~search_service_client.models.ServiceCounters + :type counters: ~azure.search.documents.models.ServiceCounters :param limits: Required. Service level general limits. - :type limits: ~search_service_client.models.ServiceLimits + :type limits: ~azure.search.documents.models.ServiceLimits """ _validation = { @@ -4693,10 +4698,10 @@ class ShaperSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -4725,7 +4730,7 @@ def __init__( **kwargs ): super(ShaperSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Util.ShaperSkill' + self.odata_type: str = '#Microsoft.Skills.Util.ShaperSkill' class ShingleTokenFilter(TokenFilter): @@ -4791,7 +4796,7 @@ def __init__( **kwargs ): super(ShingleTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.ShingleTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.ShingleTokenFilter' self.max_shingle_size = max_shingle_size self.min_shingle_size = min_shingle_size self.output_unigrams = output_unigrams @@ -4812,11 +4817,11 @@ class SnowballTokenFilter(TokenFilter): spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. :type name: str - :param language: Required. The language to use. Possible values include: 'armenian', 'basque', - 'catalan', 'danish', 'dutch', 'english', 'finnish', 'french', 'german', 'german2', 'hungarian', - 'italian', 'kp', 'lovins', 'norwegian', 'porter', 'portuguese', 'romanian', 'russian', - 'spanish', 'swedish', 'turkish'. - :type language: str or ~search_service_client.models.SnowballTokenFilterLanguage + :param language: Required. The language to use. Possible values include: "armenian", "basque", + "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", "hungarian", + "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", "russian", + "spanish", "swedish", "turkish". + :type language: str or ~azure.search.documents.models.SnowballTokenFilterLanguage """ _validation = { @@ -4839,7 +4844,7 @@ def __init__( **kwargs ): super(SnowballTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.SnowballTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.SnowballTokenFilter' self.language = language @@ -4875,7 +4880,7 @@ def __init__( **kwargs ): super(SoftDeleteColumnDeletionDetectionPolicy, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy' + self.odata_type: str = '#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy' self.soft_delete_column_name = soft_delete_column_name self.soft_delete_marker_value = soft_delete_marker_value @@ -4900,16 +4905,16 @@ class SplitSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'da', 'de', 'en', 'es', 'fi', 'fr', 'it', 'ko', 'pt'. - :type default_language_code: str or ~search_service_client.models.SplitSkillLanguage + Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt". + :type default_language_code: str or ~azure.search.documents.models.SplitSkillLanguage :param text_split_mode: A value indicating which split mode to perform. Possible values - include: 'pages', 'sentences'. - :type text_split_mode: str or ~search_service_client.models.TextSplitMode + include: "pages", "sentences". + :type text_split_mode: str or ~azure.search.documents.models.TextSplitMode :param maximum_page_length: The desired maximum page length. Default is 10000. :type maximum_page_length: int """ @@ -4946,7 +4951,7 @@ def __init__( **kwargs ): super(SplitSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.SplitSkill' + self.odata_type: str = '#Microsoft.Skills.Text.SplitSkill' self.default_language_code = default_language_code self.text_split_mode = text_split_mode self.maximum_page_length = maximum_page_length @@ -4975,7 +4980,7 @@ def __init__( **kwargs ): super(SqlIntegratedChangeTrackingPolicy, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' + self.odata_type: str = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' class StemmerOverrideTokenFilter(TokenFilter): @@ -5015,7 +5020,7 @@ def __init__( **kwargs ): super(StemmerOverrideTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' self.rules = rules @@ -5031,16 +5036,16 @@ class StemmerTokenFilter(TokenFilter): spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. :type name: str - :param language: Required. The language to use. Possible values include: 'arabic', 'armenian', - 'basque', 'brazilian', 'bulgarian', 'catalan', 'czech', 'danish', 'dutch', 'dutchKp', - 'english', 'lightEnglish', 'minimalEnglish', 'possessiveEnglish', 'porter2', 'lovins', - 'finnish', 'lightFinnish', 'french', 'lightFrench', 'minimalFrench', 'galician', - 'minimalGalician', 'german', 'german2', 'lightGerman', 'minimalGerman', 'greek', 'hindi', - 'hungarian', 'lightHungarian', 'indonesian', 'irish', 'italian', 'lightItalian', 'sorani', - 'latvian', 'norwegian', 'lightNorwegian', 'minimalNorwegian', 'lightNynorsk', 'minimalNynorsk', - 'portuguese', 'lightPortuguese', 'minimalPortuguese', 'portugueseRslp', 'romanian', 'russian', - 'lightRussian', 'spanish', 'lightSpanish', 'swedish', 'lightSwedish', 'turkish'. - :type language: str or ~search_service_client.models.StemmerTokenFilterLanguage + :param language: Required. The language to use. Possible values include: "arabic", "armenian", + "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "dutchKp", + "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", "lovins", + "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician", + "minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi", + "hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani", + "latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk", + "portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian", + "lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish". + :type language: str or ~azure.search.documents.models.StemmerTokenFilterLanguage """ _validation = { @@ -5063,7 +5068,7 @@ def __init__( **kwargs ): super(StemmerTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StemmerTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.StemmerTokenFilter' self.language = language @@ -5102,7 +5107,7 @@ def __init__( **kwargs ): super(StopAnalyzer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StopAnalyzer' + self.odata_type: str = '#Microsoft.Azure.Search.StopAnalyzer' self.stopwords = stopwords @@ -5122,12 +5127,12 @@ class StopwordsTokenFilter(TokenFilter): both be set. :type stopwords: list[str] :param stopwords_list: A predefined list of stopwords to use. This property and the stopwords - property cannot both be set. Default is English. Possible values include: 'arabic', 'armenian', - 'basque', 'brazilian', 'bulgarian', 'catalan', 'czech', 'danish', 'dutch', 'english', - 'finnish', 'french', 'galician', 'german', 'greek', 'hindi', 'hungarian', 'indonesian', - 'irish', 'italian', 'latvian', 'norwegian', 'persian', 'portuguese', 'romanian', 'russian', - 'sorani', 'spanish', 'swedish', 'thai', 'turkish'. - :type stopwords_list: str or ~search_service_client.models.StopwordsList + property cannot both be set. Default is English. Possible values include: "arabic", "armenian", + "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english", + "finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian", + "irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian", + "sorani", "spanish", "swedish", "thai", "turkish". + :type stopwords_list: str or ~azure.search.documents.models.StopwordsList :param ignore_case: A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. :type ignore_case: bool @@ -5161,7 +5166,7 @@ def __init__( **kwargs ): super(StopwordsTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StopwordsTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.StopwordsTokenFilter' self.stopwords = stopwords self.stopwords_list = stopwords_list self.ignore_case = ignore_case @@ -5234,7 +5239,7 @@ class SynonymMap(msrest.serialization.Model): needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :type encryption_key: ~search_service_client.models.SearchResourceEncryptionKey + :type encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey :param e_tag: The ETag of the synonym map. :type e_tag: str """ @@ -5326,7 +5331,7 @@ def __init__( **kwargs ): super(SynonymTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.SynonymTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.SynonymTokenFilter' self.synonyms = synonyms self.ignore_case = ignore_case self.expand = expand @@ -5346,11 +5351,11 @@ class TagScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the tag scoring function. - :type parameters: ~search_service_client.models.TagScoringParameters + :type parameters: ~azure.search.documents.models.TagScoringParameters """ _validation = { @@ -5378,7 +5383,7 @@ def __init__( **kwargs ): super(TagScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) - self.type = 'tag' + self.type: str = 'tag' self.parameters = parameters @@ -5430,37 +5435,37 @@ class TextTranslationSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_to_language_code: Required. The language code to translate documents into for - documents that don't specify the to language explicitly. Possible values include: 'af', 'ar', - 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', - 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', - 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', - 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', - 'vi', 'cy', 'yua'. + documents that don't specify the to language explicitly. Possible values include: "af", "ar", + "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", + "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", + "tlh", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "otq", "ro", "ru", "sm", + "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", "ur", + "vi", "cy", "yua". :type default_to_language_code: str or - ~search_service_client.models.TextTranslationSkillLanguage + ~azure.search.documents.models.TextTranslationSkillLanguage :param default_from_language_code: The language code to translate documents from for documents - that don't specify the from language explicitly. Possible values include: 'af', 'ar', 'bn', - 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', 'fil', - 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', 'tlh', - 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', 'sr-Cyrl', - 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', 'vi', 'cy', - 'yua'. + that don't specify the from language explicitly. Possible values include: "af", "ar", "bn", + "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", + "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", + "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "otq", "ro", "ru", "sm", "sr-Cyrl", + "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", + "yua". :type default_from_language_code: str or - ~search_service_client.models.TextTranslationSkillLanguage + ~azure.search.documents.models.TextTranslationSkillLanguage :param suggested_from: The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the - automatic language detection is unsuccessful. Default is en. Possible values include: 'af', - 'ar', 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', - 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', - 'sw', 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', - 'sm', 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', - 'ur', 'vi', 'cy', 'yua'. - :type suggested_from: str or ~search_service_client.models.TextTranslationSkillLanguage + automatic language detection is unsuccessful. Default is en. Possible values include: "af", + "ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", + "fj", "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", + "sw", "tlh", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "otq", "ro", "ru", + "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", + "ur", "vi", "cy", "yua". + :type suggested_from: str or ~azure.search.documents.models.TextTranslationSkillLanguage """ _validation = { @@ -5496,7 +5501,7 @@ def __init__( **kwargs ): super(TextTranslationSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.TranslationSkill' + self.odata_type: str = '#Microsoft.Skills.Text.TranslationSkill' self.default_to_language_code = default_to_language_code self.default_from_language_code = default_from_language_code self.suggested_from = suggested_from @@ -5566,7 +5571,7 @@ def __init__( **kwargs ): super(TruncateTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.TruncateTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.TruncateTokenFilter' self.length = length @@ -5607,7 +5612,7 @@ def __init__( **kwargs ): super(UaxUrlEmailTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.UaxUrlEmailTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.UaxUrlEmailTokenizer' self.max_token_length = max_token_length @@ -5647,7 +5652,7 @@ def __init__( **kwargs ): super(UniqueTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.UniqueTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.UniqueTokenFilter' self.only_on_same_position = only_on_same_position @@ -5671,10 +5676,10 @@ class WebApiSkill(SearchIndexerSkill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param uri: Required. The url for the Web API. :type uri: str :param http_headers: The headers required to make the http request. @@ -5729,7 +5734,7 @@ def __init__( **kwargs ): super(WebApiSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Custom.WebApiSkill' + self.odata_type: str = '#Microsoft.Skills.Custom.WebApiSkill' self.uri = uri self.http_headers = http_headers self.http_method = http_method @@ -5819,7 +5824,7 @@ def __init__( **kwargs ): super(WordDelimiterTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.WordDelimiterTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.WordDelimiterTokenFilter' self.generate_word_parts = generate_word_parts self.generate_number_parts = generate_number_parts self.catenate_words = catenate_words diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py index c96ed67d5b50..ee355eec0897 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py @@ -1,20 +1,85 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from enum import Enum -class SearchIndexerDataSourceType(str, Enum): - """Defines the type of a datasource. +class CjkBigramTokenFilterScripts(str, Enum): + """Scripts that can be ignored by CjkBigramTokenFilter. """ - azure_sql = "azuresql" #: Indicates an Azure SQL datasource. - cosmos_db = "cosmosdb" #: Indicates a CosmosDB datasource. - azure_blob = "azureblob" #: Indicates a Azure Blob datasource. - azure_table = "azuretable" #: Indicates a Azure Table datasource. - my_sql = "mysql" #: Indicates a MySql datasource. + han = "han" #: Ignore Han script when forming bigrams of CJK terms. + hiragana = "hiragana" #: Ignore Hiragana script when forming bigrams of CJK terms. + katakana = "katakana" #: Ignore Katakana script when forming bigrams of CJK terms. + hangul = "hangul" #: Ignore Hangul script when forming bigrams of CJK terms. + +class EdgeNGramTokenFilterSide(str, Enum): + """Specifies which side of the input an n-gram should be generated from. + """ + + front = "front" #: Specifies that the n-gram should be generated from the front of the input. + back = "back" #: Specifies that the n-gram should be generated from the back of the input. + +class EntityCategory(str, Enum): + """A string indicating what entity categories to return. + """ + + location = "location" #: Entities describing a physical location. + organization = "organization" #: Entities describing an organization. + person = "person" #: Entities describing a person. + quantity = "quantity" #: Entities describing a quantity. + datetime = "datetime" #: Entities describing a date and time. + url = "url" #: Entities describing a URL. + email = "email" #: Entities describing an email address. + +class EntityRecognitionSkillLanguage(str, Enum): + """The language codes supported for input text by EntityRecognitionSkill. + """ + + ar = "ar" #: Arabic. + cs = "cs" #: Czech. + zh_hans = "zh-Hans" #: Chinese-Simplified. + zh_hant = "zh-Hant" #: Chinese-Traditional. + da = "da" #: Danish. + nl = "nl" #: Dutch. + en = "en" #: English. + fi = "fi" #: Finnish. + fr = "fr" #: French. + de = "de" #: German. + el = "el" #: Greek. + hu = "hu" #: Hungarian. + it = "it" #: Italian. + ja = "ja" #: Japanese. + ko = "ko" #: Korean. + no = "no" #: Norwegian (Bokmaal). + pl = "pl" #: Polish. + pt = "pt-PT" #: Portuguese (Portugal). + pt_br = "pt-BR" #: Portuguese (Brazil). + ru = "ru" #: Russian. + es = "es" #: Spanish. + sv = "sv" #: Swedish. + tr = "tr" #: Turkish. + +class ImageAnalysisSkillLanguage(str, Enum): + """The language codes supported for input by ImageAnalysisSkill. + """ + + en = "en" #: English. + es = "es" #: Spanish. + ja = "ja" #: Japanese. + pt = "pt" #: Portuguese. + zh = "zh" #: Chinese. + +class ImageDetail(str, Enum): + """A string indicating which domain-specific details to return. + """ + + celebrities = "celebrities" #: Details recognized as celebrities. + landmarks = "landmarks" #: Details recognized as landmarks. class IndexerExecutionStatus(str, Enum): """Represents the status of an individual indexer execution. @@ -25,18 +90,34 @@ class IndexerExecutionStatus(str, Enum): in_progress = "inProgress" #: Indexer execution is in progress. reset = "reset" #: Indexer has been reset. -class SearchFieldDataType(str, Enum): - """Defines the data type of a field in a search index. +class IndexerStatus(str, Enum): + """Represents the overall indexer status. """ - string = "Edm.String" #: Indicates that a field contains a string. - int32 = "Edm.Int32" #: Indicates that a field contains a 32-bit signed integer. - int64 = "Edm.Int64" #: Indicates that a field contains a 64-bit signed integer. - double = "Edm.Double" #: Indicates that a field contains an IEEE double-precision floating point number. - boolean = "Edm.Boolean" #: Indicates that a field contains a Boolean value (true or false). - date_time_offset = "Edm.DateTimeOffset" #: Indicates that a field contains a date/time value, including timezone information. - geography_point = "Edm.GeographyPoint" #: Indicates that a field contains a geo-location in terms of longitude and latitude. - complex = "Edm.ComplexType" #: Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. + unknown = "unknown" #: Indicates that the indexer is in an unknown state. + error = "error" #: Indicates that the indexer experienced an error that cannot be corrected without human intervention. + running = "running" #: Indicates that the indexer is running normally. + +class KeyPhraseExtractionSkillLanguage(str, Enum): + """The language codes supported for input text by KeyPhraseExtractionSkill. + """ + + da = "da" #: Danish. + nl = "nl" #: Dutch. + en = "en" #: English. + fi = "fi" #: Finnish. + fr = "fr" #: French. + de = "de" #: German. + it = "it" #: Italian. + ja = "ja" #: Japanese. + ko = "ko" #: Korean. + no = "no" #: Norwegian (Bokmaal). + pl = "pl" #: Polish. + pt = "pt-PT" #: Portuguese (Portugal). + pt_br = "pt-BR" #: Portuguese (Brazil). + ru = "ru" #: Russian. + es = "es" #: Spanish. + sv = "sv" #: Swedish. class LexicalAnalyzerName(str, Enum): """Defines the names of all text analyzers supported by Azure Cognitive Search. @@ -136,115 +217,6 @@ class LexicalAnalyzerName(str, Enum): stop = "stop" #: Divides text at non-letters; Applies the lowercase and stopword token filters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopAnalyzer.html. whitespace = "whitespace" #: An analyzer that uses the whitespace tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceAnalyzer.html. -class ScoringFunctionInterpolation(str, Enum): - """Defines the function used to interpolate score boosting across a range of documents. - """ - - linear = "linear" #: Boosts scores by a linearly decreasing amount. This is the default interpolation for scoring functions. - constant = "constant" #: Boosts scores by a constant factor. - quadratic = "quadratic" #: Boosts scores by an amount that decreases quadratically. Boosts decrease slowly for higher scores, and more quickly as the scores decrease. This interpolation option is not allowed in tag scoring functions. - logarithmic = "logarithmic" #: Boosts scores by an amount that decreases logarithmically. Boosts decrease quickly for higher scores, and more slowly as the scores decrease. This interpolation option is not allowed in tag scoring functions. - -class ScoringFunctionAggregation(str, Enum): - """Defines the aggregation function used to combine the results of all the scoring functions in a - scoring profile. - """ - - sum = "sum" #: Boost scores by the sum of all scoring function results. - average = "average" #: Boost scores by the average of all scoring function results. - minimum = "minimum" #: Boost scores by the minimum of all scoring function results. - maximum = "maximum" #: Boost scores by the maximum of all scoring function results. - first_matching = "firstMatching" #: Boost scores using the first applicable scoring function in the scoring profile. - -class TokenFilterName(str, Enum): - """Defines the names of all token filters supported by Azure Cognitive Search. - """ - - arabic_normalization = "arabic_normalization" #: A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html. - apostrophe = "apostrophe" #: Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html. - ascii_folding = "asciifolding" #: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html. - cjk_bigram = "cjk_bigram" #: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html. - cjk_width = "cjk_width" #: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html. - classic = "classic" #: Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html. - common_gram = "common_grams" #: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html. - edge_n_gram = "edgeNGram_v2" #: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html. - elision = "elision" #: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html. - german_normalization = "german_normalization" #: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html. - hindi_normalization = "hindi_normalization" #: Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html. - indic_normalization = "indic_normalization" #: Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html. - keyword_repeat = "keyword_repeat" #: Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html. - k_stem = "kstem" #: A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html. - length = "length" #: Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html. - limit = "limit" #: Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html. - lowercase = "lowercase" #: Normalizes token text to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm. - n_gram = "nGram_v2" #: Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html. - persian_normalization = "persian_normalization" #: Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html. - phonetic = "phonetic" #: Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html. - porter_stem = "porter_stem" #: Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/PorterStemmer. - reverse = "reverse" #: Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html. - scandinavian_normalization = "scandinavian_normalization" #: Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html. - scandinavian_folding_normalization = "scandinavian_folding" #: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html. - shingle = "shingle" #: Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html. - snowball = "snowball" #: A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html. - sorani_normalization = "sorani_normalization" #: Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html. - stemmer = "stemmer" #: Language specific stemming filter. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters. - stopwords = "stopwords" #: Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html. - trim = "trim" #: Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html. - truncate = "truncate" #: Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html. - unique = "unique" #: Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html. - uppercase = "uppercase" #: Normalizes token text to upper case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html. - word_delimiter = "word_delimiter" #: Splits words into subwords and performs optional transformations on subword groups. - -class TokenCharacterKind(str, Enum): - """Represents classes of characters on which a token filter can operate. - """ - - letter = "letter" #: Keeps letters in tokens. - digit = "digit" #: Keeps digits in tokens. - whitespace = "whitespace" #: Keeps whitespace in tokens. - punctuation = "punctuation" #: Keeps punctuation in tokens. - symbol = "symbol" #: Keeps symbols in tokens. - -class CjkBigramTokenFilterScripts(str, Enum): - """Scripts that can be ignored by CjkBigramTokenFilter. - """ - - han = "han" #: Ignore Han script when forming bigrams of CJK terms. - hiragana = "hiragana" #: Ignore Hiragana script when forming bigrams of CJK terms. - katakana = "katakana" #: Ignore Katakana script when forming bigrams of CJK terms. - hangul = "hangul" #: Ignore Hangul script when forming bigrams of CJK terms. - -class VisualFeature(str, Enum): - """The strings indicating what visual feature types to return. - """ - - adult = "adult" #: Visual features recognized as adult persons. - brands = "brands" #: Visual features recognized as commercial brands. - categories = "categories" #: Categories. - description = "description" #: Description. - faces = "faces" #: Visual features recognized as people faces. - objects = "objects" #: Visual features recognized as objects. - tags = "tags" #: Tags. - -class ImageDetail(str, Enum): - """A string indicating which domain-specific details to return. - """ - - celebrities = "celebrities" #: Details recognized as celebrities. - landmarks = "landmarks" #: Details recognized as landmarks. - -class EntityCategory(str, Enum): - """A string indicating what entity categories to return. - """ - - location = "location" #: Entities describing a physical location. - organization = "organization" #: Entities describing an organization. - person = "person" #: Entities describing a person. - quantity = "quantity" #: Entities describing a quantity. - datetime = "datetime" #: Entities describing a date and time. - url = "url" #: Entities describing a URL. - email = "email" #: Entities describing an email address. - class LexicalTokenizerName(str, Enum): """Defines the names of all tokenizers supported by Azure Cognitive Search. """ @@ -263,269 +235,8 @@ class LexicalTokenizerName(str, Enum): uax_url_email = "uax_url_email" #: Tokenizes urls and emails as one token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html. whitespace = "whitespace" #: Divides text at whitespace. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html. -class RegexFlags(str, Enum): - """Defines flags that can be combined to control how regular expressions are used in the pattern - analyzer and pattern tokenizer. - """ - - canon_eq = "CANON_EQ" #: Enables canonical equivalence. - case_insensitive = "CASE_INSENSITIVE" #: Enables case-insensitive matching. - comments = "COMMENTS" #: Permits whitespace and comments in the pattern. - dot_all = "DOTALL" #: Enables dotall mode. - literal = "LITERAL" #: Enables literal parsing of the pattern. - multiline = "MULTILINE" #: Enables multiline mode. - unicode_case = "UNICODE_CASE" #: Enables Unicode-aware case folding. - unix_lines = "UNIX_LINES" #: Enables Unix lines mode. - -class KeyPhraseExtractionSkillLanguage(str, Enum): - """The language codes supported for input text by KeyPhraseExtractionSkill. - """ - - da = "da" #: Danish. - nl = "nl" #: Dutch. - en = "en" #: English. - fi = "fi" #: Finnish. - fr = "fr" #: French. - de = "de" #: German. - it = "it" #: Italian. - ja = "ja" #: Japanese. - ko = "ko" #: Korean. - no = "no" #: Norwegian (Bokmaal). - pl = "pl" #: Polish. - pt = "pt-PT" #: Portuguese (Portugal). - pt_br = "pt-BR" #: Portuguese (Brazil). - ru = "ru" #: Russian. - es = "es" #: Spanish. - sv = "sv" #: Swedish. - -class OcrSkillLanguage(str, Enum): - """The language codes supported for input by OcrSkill. - """ - - zh_hans = "zh-Hans" #: Chinese-Simplified. - zh_hant = "zh-Hant" #: Chinese-Traditional. - cs = "cs" #: Czech. - da = "da" #: Danish. - nl = "nl" #: Dutch. - en = "en" #: English. - fi = "fi" #: Finnish. - fr = "fr" #: French. - de = "de" #: German. - el = "el" #: Greek. - hu = "hu" #: Hungarian. - it = "it" #: Italian. - ja = "ja" #: Japanese. - ko = "ko" #: Korean. - nb = "nb" #: Norwegian (Bokmaal). - pl = "pl" #: Polish. - pt = "pt" #: Portuguese. - ru = "ru" #: Russian. - es = "es" #: Spanish. - sv = "sv" #: Swedish. - tr = "tr" #: Turkish. - ar = "ar" #: Arabic. - ro = "ro" #: Romanian. - sr_cyrl = "sr-Cyrl" #: Serbian (Cyrillic, Serbia). - sr_latn = "sr-Latn" #: Serbian (Latin, Serbia). - sk = "sk" #: Slovak. - -class ImageAnalysisSkillLanguage(str, Enum): - """The language codes supported for input by ImageAnalysisSkill. - """ - - en = "en" #: English. - es = "es" #: Spanish. - ja = "ja" #: Japanese. - pt = "pt" #: Portuguese. - zh = "zh" #: Chinese. - -class EntityRecognitionSkillLanguage(str, Enum): - """The language codes supported for input text by EntityRecognitionSkill. - """ - - ar = "ar" #: Arabic. - cs = "cs" #: Czech. - zh_hans = "zh-Hans" #: Chinese-Simplified. - zh_hant = "zh-Hant" #: Chinese-Traditional. - da = "da" #: Danish. - nl = "nl" #: Dutch. - en = "en" #: English. - fi = "fi" #: Finnish. - fr = "fr" #: French. - de = "de" #: German. - el = "el" #: Greek. - hu = "hu" #: Hungarian. - it = "it" #: Italian. - ja = "ja" #: Japanese. - ko = "ko" #: Korean. - no = "no" #: Norwegian (Bokmaal). - pl = "pl" #: Polish. - pt = "pt-PT" #: Portuguese (Portugal). - pt_br = "pt-BR" #: Portuguese (Brazil). - ru = "ru" #: Russian. - es = "es" #: Spanish. - sv = "sv" #: Swedish. - tr = "tr" #: Turkish. - -class SentimentSkillLanguage(str, Enum): - """The language codes supported for input text by SentimentSkill. - """ - - da = "da" #: Danish. - nl = "nl" #: Dutch. - en = "en" #: English. - fi = "fi" #: Finnish. - fr = "fr" #: French. - de = "de" #: German. - el = "el" #: Greek. - it = "it" #: Italian. - no = "no" #: Norwegian (Bokmaal). - pl = "pl" #: Polish. - pt = "pt-PT" #: Portuguese (Portugal). - ru = "ru" #: Russian. - es = "es" #: Spanish. - sv = "sv" #: Swedish. - tr = "tr" #: Turkish. - -class SplitSkillLanguage(str, Enum): - """The language codes supported for input text by SplitSkill. - """ - - da = "da" #: Danish. - de = "de" #: German. - en = "en" #: English. - es = "es" #: Spanish. - fi = "fi" #: Finnish. - fr = "fr" #: French. - it = "it" #: Italian. - ko = "ko" #: Korean. - pt = "pt" #: Portuguese. - -class TextTranslationSkillLanguage(str, Enum): - """The language codes supported for input text by TextTranslationSkill. - """ - - af = "af" #: Afrikaans. - ar = "ar" #: Arabic. - bn = "bn" #: Bangla. - bs = "bs" #: Bosnian (Latin). - bg = "bg" #: Bulgarian. - yue = "yue" #: Cantonese (Traditional). - ca = "ca" #: Catalan. - zh_hans = "zh-Hans" #: Chinese Simplified. - zh_hant = "zh-Hant" #: Chinese Traditional. - hr = "hr" #: Croatian. - cs = "cs" #: Czech. - da = "da" #: Danish. - nl = "nl" #: Dutch. - en = "en" #: English. - et = "et" #: Estonian. - fj = "fj" #: Fijian. - fil = "fil" #: Filipino. - fi = "fi" #: Finnish. - fr = "fr" #: French. - de = "de" #: German. - el = "el" #: Greek. - ht = "ht" #: Haitian Creole. - he = "he" #: Hebrew. - hi = "hi" #: Hindi. - mww = "mww" #: Hmong Daw. - hu = "hu" #: Hungarian. - is_enum = "is" #: Icelandic. - id = "id" #: Indonesian. - it = "it" #: Italian. - ja = "ja" #: Japanese. - sw = "sw" #: Kiswahili. - tlh = "tlh" #: Klingon. - ko = "ko" #: Korean. - lv = "lv" #: Latvian. - lt = "lt" #: Lithuanian. - mg = "mg" #: Malagasy. - ms = "ms" #: Malay. - mt = "mt" #: Maltese. - nb = "nb" #: Norwegian. - fa = "fa" #: Persian. - pl = "pl" #: Polish. - pt = "pt" #: Portuguese. - otq = "otq" #: Queretaro Otomi. - ro = "ro" #: Romanian. - ru = "ru" #: Russian. - sm = "sm" #: Samoan. - sr_cyrl = "sr-Cyrl" #: Serbian (Cyrillic). - sr_latn = "sr-Latn" #: Serbian (Latin). - sk = "sk" #: Slovak. - sl = "sl" #: Slovenian. - es = "es" #: Spanish. - sv = "sv" #: Swedish. - ty = "ty" #: Tahitian. - ta = "ta" #: Tamil. - te = "te" #: Telugu. - th = "th" #: Thai. - to = "to" #: Tongan. - tr = "tr" #: Turkish. - uk = "uk" #: Ukrainian. - ur = "ur" #: Urdu. - vi = "vi" #: Vietnamese. - cy = "cy" #: Welsh. - yua = "yua" #: Yucatec Maya. - -class IndexerStatus(str, Enum): - """Represents the overall indexer status. - """ - - unknown = "unknown" #: Indicates that the indexer is in an unknown state. - error = "error" #: Indicates that the indexer experienced an error that cannot be corrected without human intervention. - running = "running" #: Indicates that the indexer is running normally. - -class MicrosoftTokenizerLanguage(str, Enum): - """Lists the languages supported by the Microsoft language tokenizer. - """ - - bangla = "bangla" #: Selects the Microsoft tokenizer for Bangla. - bulgarian = "bulgarian" #: Selects the Microsoft tokenizer for Bulgarian. - catalan = "catalan" #: Selects the Microsoft tokenizer for Catalan. - chinese_simplified = "chineseSimplified" #: Selects the Microsoft tokenizer for Chinese (Simplified). - chinese_traditional = "chineseTraditional" #: Selects the Microsoft tokenizer for Chinese (Traditional). - croatian = "croatian" #: Selects the Microsoft tokenizer for Croatian. - czech = "czech" #: Selects the Microsoft tokenizer for Czech. - danish = "danish" #: Selects the Microsoft tokenizer for Danish. - dutch = "dutch" #: Selects the Microsoft tokenizer for Dutch. - english = "english" #: Selects the Microsoft tokenizer for English. - french = "french" #: Selects the Microsoft tokenizer for French. - german = "german" #: Selects the Microsoft tokenizer for German. - greek = "greek" #: Selects the Microsoft tokenizer for Greek. - gujarati = "gujarati" #: Selects the Microsoft tokenizer for Gujarati. - hindi = "hindi" #: Selects the Microsoft tokenizer for Hindi. - icelandic = "icelandic" #: Selects the Microsoft tokenizer for Icelandic. - indonesian = "indonesian" #: Selects the Microsoft tokenizer for Indonesian. - italian = "italian" #: Selects the Microsoft tokenizer for Italian. - japanese = "japanese" #: Selects the Microsoft tokenizer for Japanese. - kannada = "kannada" #: Selects the Microsoft tokenizer for Kannada. - korean = "korean" #: Selects the Microsoft tokenizer for Korean. - malay = "malay" #: Selects the Microsoft tokenizer for Malay. - malayalam = "malayalam" #: Selects the Microsoft tokenizer for Malayalam. - marathi = "marathi" #: Selects the Microsoft tokenizer for Marathi. - norwegian_bokmaal = "norwegianBokmaal" #: Selects the Microsoft tokenizer for Norwegian (Bokmål). - polish = "polish" #: Selects the Microsoft tokenizer for Polish. - portuguese = "portuguese" #: Selects the Microsoft tokenizer for Portuguese. - portuguese_brazilian = "portugueseBrazilian" #: Selects the Microsoft tokenizer for Portuguese (Brazil). - punjabi = "punjabi" #: Selects the Microsoft tokenizer for Punjabi. - romanian = "romanian" #: Selects the Microsoft tokenizer for Romanian. - russian = "russian" #: Selects the Microsoft tokenizer for Russian. - serbian_cyrillic = "serbianCyrillic" #: Selects the Microsoft tokenizer for Serbian (Cyrillic). - serbian_latin = "serbianLatin" #: Selects the Microsoft tokenizer for Serbian (Latin). - slovenian = "slovenian" #: Selects the Microsoft tokenizer for Slovenian. - spanish = "spanish" #: Selects the Microsoft tokenizer for Spanish. - swedish = "swedish" #: Selects the Microsoft tokenizer for Swedish. - tamil = "tamil" #: Selects the Microsoft tokenizer for Tamil. - telugu = "telugu" #: Selects the Microsoft tokenizer for Telugu. - thai = "thai" #: Selects the Microsoft tokenizer for Thai. - ukrainian = "ukrainian" #: Selects the Microsoft tokenizer for Ukrainian. - urdu = "urdu" #: Selects the Microsoft tokenizer for Urdu. - vietnamese = "vietnamese" #: Selects the Microsoft tokenizer for Vietnamese. - -class MicrosoftStemmingTokenizerLanguage(str, Enum): - """Lists the languages supported by the Microsoft language stemming tokenizer. +class MicrosoftStemmingTokenizerLanguage(str, Enum): + """Lists the languages supported by the Microsoft language stemming tokenizer. """ arabic = "arabic" #: Selects the Microsoft stemming tokenizer for Arabic. @@ -574,12 +285,83 @@ class MicrosoftStemmingTokenizerLanguage(str, Enum): ukrainian = "ukrainian" #: Selects the Microsoft stemming tokenizer for Ukrainian. urdu = "urdu" #: Selects the Microsoft stemming tokenizer for Urdu. -class EdgeNGramTokenFilterSide(str, Enum): - """Specifies which side of the input an n-gram should be generated from. +class MicrosoftTokenizerLanguage(str, Enum): + """Lists the languages supported by the Microsoft language tokenizer. + """ + + bangla = "bangla" #: Selects the Microsoft tokenizer for Bangla. + bulgarian = "bulgarian" #: Selects the Microsoft tokenizer for Bulgarian. + catalan = "catalan" #: Selects the Microsoft tokenizer for Catalan. + chinese_simplified = "chineseSimplified" #: Selects the Microsoft tokenizer for Chinese (Simplified). + chinese_traditional = "chineseTraditional" #: Selects the Microsoft tokenizer for Chinese (Traditional). + croatian = "croatian" #: Selects the Microsoft tokenizer for Croatian. + czech = "czech" #: Selects the Microsoft tokenizer for Czech. + danish = "danish" #: Selects the Microsoft tokenizer for Danish. + dutch = "dutch" #: Selects the Microsoft tokenizer for Dutch. + english = "english" #: Selects the Microsoft tokenizer for English. + french = "french" #: Selects the Microsoft tokenizer for French. + german = "german" #: Selects the Microsoft tokenizer for German. + greek = "greek" #: Selects the Microsoft tokenizer for Greek. + gujarati = "gujarati" #: Selects the Microsoft tokenizer for Gujarati. + hindi = "hindi" #: Selects the Microsoft tokenizer for Hindi. + icelandic = "icelandic" #: Selects the Microsoft tokenizer for Icelandic. + indonesian = "indonesian" #: Selects the Microsoft tokenizer for Indonesian. + italian = "italian" #: Selects the Microsoft tokenizer for Italian. + japanese = "japanese" #: Selects the Microsoft tokenizer for Japanese. + kannada = "kannada" #: Selects the Microsoft tokenizer for Kannada. + korean = "korean" #: Selects the Microsoft tokenizer for Korean. + malay = "malay" #: Selects the Microsoft tokenizer for Malay. + malayalam = "malayalam" #: Selects the Microsoft tokenizer for Malayalam. + marathi = "marathi" #: Selects the Microsoft tokenizer for Marathi. + norwegian_bokmaal = "norwegianBokmaal" #: Selects the Microsoft tokenizer for Norwegian (Bokmål). + polish = "polish" #: Selects the Microsoft tokenizer for Polish. + portuguese = "portuguese" #: Selects the Microsoft tokenizer for Portuguese. + portuguese_brazilian = "portugueseBrazilian" #: Selects the Microsoft tokenizer for Portuguese (Brazil). + punjabi = "punjabi" #: Selects the Microsoft tokenizer for Punjabi. + romanian = "romanian" #: Selects the Microsoft tokenizer for Romanian. + russian = "russian" #: Selects the Microsoft tokenizer for Russian. + serbian_cyrillic = "serbianCyrillic" #: Selects the Microsoft tokenizer for Serbian (Cyrillic). + serbian_latin = "serbianLatin" #: Selects the Microsoft tokenizer for Serbian (Latin). + slovenian = "slovenian" #: Selects the Microsoft tokenizer for Slovenian. + spanish = "spanish" #: Selects the Microsoft tokenizer for Spanish. + swedish = "swedish" #: Selects the Microsoft tokenizer for Swedish. + tamil = "tamil" #: Selects the Microsoft tokenizer for Tamil. + telugu = "telugu" #: Selects the Microsoft tokenizer for Telugu. + thai = "thai" #: Selects the Microsoft tokenizer for Thai. + ukrainian = "ukrainian" #: Selects the Microsoft tokenizer for Ukrainian. + urdu = "urdu" #: Selects the Microsoft tokenizer for Urdu. + vietnamese = "vietnamese" #: Selects the Microsoft tokenizer for Vietnamese. + +class OcrSkillLanguage(str, Enum): + """The language codes supported for input by OcrSkill. """ - front = "front" #: Specifies that the n-gram should be generated from the front of the input. - back = "back" #: Specifies that the n-gram should be generated from the back of the input. + zh_hans = "zh-Hans" #: Chinese-Simplified. + zh_hant = "zh-Hant" #: Chinese-Traditional. + cs = "cs" #: Czech. + da = "da" #: Danish. + nl = "nl" #: Dutch. + en = "en" #: English. + fi = "fi" #: Finnish. + fr = "fr" #: French. + de = "de" #: German. + el = "el" #: Greek. + hu = "hu" #: Hungarian. + it = "it" #: Italian. + ja = "ja" #: Japanese. + ko = "ko" #: Korean. + nb = "nb" #: Norwegian (Bokmaal). + pl = "pl" #: Polish. + pt = "pt" #: Portuguese. + ru = "ru" #: Russian. + es = "es" #: Spanish. + sv = "sv" #: Swedish. + tr = "tr" #: Turkish. + ar = "ar" #: Arabic. + ro = "ro" #: Romanian. + sr_cyrl = "sr-Cyrl" #: Serbian (Cyrillic, Serbia). + sr_latn = "sr-Latn" #: Serbian (Latin, Serbia). + sk = "sk" #: Slovak. class PhoneticEncoder(str, Enum): """Identifies the type of phonetic encoder to use with a PhoneticTokenFilter. @@ -597,6 +379,83 @@ class PhoneticEncoder(str, Enum): haase_phonetik = "haasePhonetik" #: Encodes a token using the Haase refinement of the Kölner Phonetik algorithm. beider_morse = "beiderMorse" #: Encodes a token into a Beider-Morse value. +class RegexFlags(str, Enum): + """Defines flags that can be combined to control how regular expressions are used in the pattern + analyzer and pattern tokenizer. + """ + + canon_eq = "CANON_EQ" #: Enables canonical equivalence. + case_insensitive = "CASE_INSENSITIVE" #: Enables case-insensitive matching. + comments = "COMMENTS" #: Permits whitespace and comments in the pattern. + dot_all = "DOTALL" #: Enables dotall mode. + literal = "LITERAL" #: Enables literal parsing of the pattern. + multiline = "MULTILINE" #: Enables multiline mode. + unicode_case = "UNICODE_CASE" #: Enables Unicode-aware case folding. + unix_lines = "UNIX_LINES" #: Enables Unix lines mode. + +class ScoringFunctionAggregation(str, Enum): + """Defines the aggregation function used to combine the results of all the scoring functions in a + scoring profile. + """ + + sum = "sum" #: Boost scores by the sum of all scoring function results. + average = "average" #: Boost scores by the average of all scoring function results. + minimum = "minimum" #: Boost scores by the minimum of all scoring function results. + maximum = "maximum" #: Boost scores by the maximum of all scoring function results. + first_matching = "firstMatching" #: Boost scores using the first applicable scoring function in the scoring profile. + +class ScoringFunctionInterpolation(str, Enum): + """Defines the function used to interpolate score boosting across a range of documents. + """ + + linear = "linear" #: Boosts scores by a linearly decreasing amount. This is the default interpolation for scoring functions. + constant = "constant" #: Boosts scores by a constant factor. + quadratic = "quadratic" #: Boosts scores by an amount that decreases quadratically. Boosts decrease slowly for higher scores, and more quickly as the scores decrease. This interpolation option is not allowed in tag scoring functions. + logarithmic = "logarithmic" #: Boosts scores by an amount that decreases logarithmically. Boosts decrease quickly for higher scores, and more slowly as the scores decrease. This interpolation option is not allowed in tag scoring functions. + +class SearchFieldDataType(str, Enum): + """Defines the data type of a field in a search index. + """ + + string = "Edm.String" #: Indicates that a field contains a string. + int32 = "Edm.Int32" #: Indicates that a field contains a 32-bit signed integer. + int64 = "Edm.Int64" #: Indicates that a field contains a 64-bit signed integer. + double = "Edm.Double" #: Indicates that a field contains an IEEE double-precision floating point number. + boolean = "Edm.Boolean" #: Indicates that a field contains a Boolean value (true or false). + date_time_offset = "Edm.DateTimeOffset" #: Indicates that a field contains a date/time value, including timezone information. + geography_point = "Edm.GeographyPoint" #: Indicates that a field contains a geo-location in terms of longitude and latitude. + complex = "Edm.ComplexType" #: Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. + +class SearchIndexerDataSourceType(str, Enum): + """Defines the type of a datasource. + """ + + azure_sql = "azuresql" #: Indicates an Azure SQL datasource. + cosmos_db = "cosmosdb" #: Indicates a CosmosDB datasource. + azure_blob = "azureblob" #: Indicates a Azure Blob datasource. + azure_table = "azuretable" #: Indicates a Azure Table datasource. + my_sql = "mysql" #: Indicates a MySql datasource. + +class SentimentSkillLanguage(str, Enum): + """The language codes supported for input text by SentimentSkill. + """ + + da = "da" #: Danish. + nl = "nl" #: Dutch. + en = "en" #: English. + fi = "fi" #: Finnish. + fr = "fr" #: French. + de = "de" #: German. + el = "el" #: Greek. + it = "it" #: Italian. + no = "no" #: Norwegian (Bokmaal). + pl = "pl" #: Polish. + pt = "pt-PT" #: Portuguese (Portugal). + ru = "ru" #: Russian. + es = "es" #: Spanish. + sv = "sv" #: Swedish. + tr = "tr" #: Turkish. + class SnowballTokenFilterLanguage(str, Enum): """The language to use for a Snowball token filter. """ @@ -624,6 +483,20 @@ class SnowballTokenFilterLanguage(str, Enum): swedish = "swedish" #: Selects the Lucene Snowball stemming tokenizer for Swedish. turkish = "turkish" #: Selects the Lucene Snowball stemming tokenizer for Turkish. +class SplitSkillLanguage(str, Enum): + """The language codes supported for input text by SplitSkill. + """ + + da = "da" #: Danish. + de = "de" #: German. + en = "en" #: English. + es = "es" #: Spanish. + fi = "fi" #: Finnish. + fr = "fr" #: French. + it = "it" #: Italian. + ko = "ko" #: Korean. + pt = "pt" #: Portuguese. + class StemmerTokenFilterLanguage(str, Enum): """The language to use for a stemmer token filter. """ @@ -732,3 +605,132 @@ class TextSplitMode(str, Enum): pages = "pages" #: Split the text into individual pages. sentences = "sentences" #: Split the text into individual sentences. + +class TextTranslationSkillLanguage(str, Enum): + """The language codes supported for input text by TextTranslationSkill. + """ + + af = "af" #: Afrikaans. + ar = "ar" #: Arabic. + bn = "bn" #: Bangla. + bs = "bs" #: Bosnian (Latin). + bg = "bg" #: Bulgarian. + yue = "yue" #: Cantonese (Traditional). + ca = "ca" #: Catalan. + zh_hans = "zh-Hans" #: Chinese Simplified. + zh_hant = "zh-Hant" #: Chinese Traditional. + hr = "hr" #: Croatian. + cs = "cs" #: Czech. + da = "da" #: Danish. + nl = "nl" #: Dutch. + en = "en" #: English. + et = "et" #: Estonian. + fj = "fj" #: Fijian. + fil = "fil" #: Filipino. + fi = "fi" #: Finnish. + fr = "fr" #: French. + de = "de" #: German. + el = "el" #: Greek. + ht = "ht" #: Haitian Creole. + he = "he" #: Hebrew. + hi = "hi" #: Hindi. + mww = "mww" #: Hmong Daw. + hu = "hu" #: Hungarian. + is_enum = "is" #: Icelandic. + id = "id" #: Indonesian. + it = "it" #: Italian. + ja = "ja" #: Japanese. + sw = "sw" #: Kiswahili. + tlh = "tlh" #: Klingon. + ko = "ko" #: Korean. + lv = "lv" #: Latvian. + lt = "lt" #: Lithuanian. + mg = "mg" #: Malagasy. + ms = "ms" #: Malay. + mt = "mt" #: Maltese. + nb = "nb" #: Norwegian. + fa = "fa" #: Persian. + pl = "pl" #: Polish. + pt = "pt" #: Portuguese. + otq = "otq" #: Queretaro Otomi. + ro = "ro" #: Romanian. + ru = "ru" #: Russian. + sm = "sm" #: Samoan. + sr_cyrl = "sr-Cyrl" #: Serbian (Cyrillic). + sr_latn = "sr-Latn" #: Serbian (Latin). + sk = "sk" #: Slovak. + sl = "sl" #: Slovenian. + es = "es" #: Spanish. + sv = "sv" #: Swedish. + ty = "ty" #: Tahitian. + ta = "ta" #: Tamil. + te = "te" #: Telugu. + th = "th" #: Thai. + to = "to" #: Tongan. + tr = "tr" #: Turkish. + uk = "uk" #: Ukrainian. + ur = "ur" #: Urdu. + vi = "vi" #: Vietnamese. + cy = "cy" #: Welsh. + yua = "yua" #: Yucatec Maya. + +class TokenCharacterKind(str, Enum): + """Represents classes of characters on which a token filter can operate. + """ + + letter = "letter" #: Keeps letters in tokens. + digit = "digit" #: Keeps digits in tokens. + whitespace = "whitespace" #: Keeps whitespace in tokens. + punctuation = "punctuation" #: Keeps punctuation in tokens. + symbol = "symbol" #: Keeps symbols in tokens. + +class TokenFilterName(str, Enum): + """Defines the names of all token filters supported by Azure Cognitive Search. + """ + + arabic_normalization = "arabic_normalization" #: A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html. + apostrophe = "apostrophe" #: Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html. + ascii_folding = "asciifolding" #: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html. + cjk_bigram = "cjk_bigram" #: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html. + cjk_width = "cjk_width" #: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html. + classic = "classic" #: Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html. + common_gram = "common_grams" #: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html. + edge_n_gram = "edgeNGram_v2" #: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html. + elision = "elision" #: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html. + german_normalization = "german_normalization" #: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html. + hindi_normalization = "hindi_normalization" #: Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html. + indic_normalization = "indic_normalization" #: Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html. + keyword_repeat = "keyword_repeat" #: Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html. + k_stem = "kstem" #: A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html. + length = "length" #: Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html. + limit = "limit" #: Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html. + lowercase = "lowercase" #: Normalizes token text to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm. + n_gram = "nGram_v2" #: Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html. + persian_normalization = "persian_normalization" #: Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html. + phonetic = "phonetic" #: Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html. + porter_stem = "porter_stem" #: Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/PorterStemmer. + reverse = "reverse" #: Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html. + scandinavian_normalization = "scandinavian_normalization" #: Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html. + scandinavian_folding_normalization = "scandinavian_folding" #: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html. + shingle = "shingle" #: Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html. + snowball = "snowball" #: A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html. + sorani_normalization = "sorani_normalization" #: Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html. + stemmer = "stemmer" #: Language specific stemming filter. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters. + stopwords = "stopwords" #: Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html. + trim = "trim" #: Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html. + truncate = "truncate" #: Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html. + unique = "unique" #: Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html. + uppercase = "uppercase" #: Normalizes token text to upper case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html. + word_delimiter = "word_delimiter" #: Splits words into subwords and performs optional transformations on subword groups. + +class VisualFeature(str, Enum): + """The strings indicating what visual feature types to return. + """ + + adult = "adult" #: Visual features recognized as adult persons. + brands = "brands" #: Visual features recognized as commercial brands. + categories = "categories" #: Categories. + description = "description" #: Description. + faces = "faces" #: Visual features recognized as people faces. + objects = "objects" #: Visual features recognized as objects. + tags = "tags" #: Tags. diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/__init__.py index d87e3cc4debb..83a82e8a47f0 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/__init__.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py index 564a0da54a45..f4c91d48c7af 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py @@ -1,9 +1,11 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error @@ -12,8 +14,12 @@ from .. import models -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class DataSourcesOperations(object): """DataSourcesOperations operations. @@ -22,7 +28,7 @@ class DataSourcesOperations(object): instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -52,7 +58,7 @@ def create_or_update( :param data_source_name: The name of the datasource to create or update. :type data_source_name: str :param data_source: The definition of the datasource to create or update. - :type data_source: ~search_service_client.models.SearchIndexerDataSource + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. :type if_match: str @@ -60,23 +66,25 @@ def create_or_update( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerDataSource or ~search_service_client.models.SearchIndexerDataSource + :rtype: ~azure.search.documents.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), @@ -96,8 +104,8 @@ def create_or_update( if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -124,7 +132,7 @@ def create_or_update( return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + create_or_update.metadata = {'url': '/datasources(\'{dataSourceName}\')'} # type: ignore def delete( self, @@ -146,14 +154,15 @@ def delete( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -161,7 +170,7 @@ def delete( api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), @@ -194,7 +203,7 @@ def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + delete.metadata = {'url': '/datasources(\'{dataSourceName}\')'} # type: ignore def get( self, @@ -208,14 +217,15 @@ def get( :param data_source_name: The name of the datasource to retrieve. :type data_source_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerDataSource + :rtype: ~azure.search.documents.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -223,7 +233,7 @@ def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), @@ -256,7 +266,7 @@ def get( return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + get.metadata = {'url': '/datasources(\'{dataSourceName}\')'} # type: ignore def list( self, @@ -272,14 +282,15 @@ def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListDataSourcesResult or the result of cls(response) - :rtype: ~search_service_client.models.ListDataSourcesResult + :rtype: ~azure.search.documents.models.ListDataSourcesResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListDataSourcesResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -287,7 +298,7 @@ def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -321,7 +332,7 @@ def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/datasources'} + list.metadata = {'url': '/datasources'} # type: ignore def create( self, @@ -333,24 +344,26 @@ def create( """Creates a new datasource. :param data_source: The definition of the datasource to create. - :type data_source: ~search_service_client.models.SearchIndexerDataSource + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerDataSource + :rtype: ~azure.search.documents.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -364,8 +377,8 @@ def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -387,4 +400,4 @@ def create( return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/datasources'} + create.metadata = {'url': '/datasources'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py index 5d4c0ff5609c..5e2392a2806f 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py @@ -1,9 +1,11 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error @@ -12,8 +14,12 @@ from .. import models -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class IndexersOperations(object): """IndexersOperations operations. @@ -22,7 +28,7 @@ class IndexersOperations(object): instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -49,14 +55,15 @@ def reset( :param indexer_name: The name of the indexer to reset. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -64,7 +71,7 @@ def reset( api_version = "2019-05-06-Preview" # Construct URL - url = self.reset.metadata['url'] + url = self.reset.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -93,7 +100,7 @@ def reset( if cls: return cls(pipeline_response, None, {}) - reset.metadata = {'url': '/indexers(\'{indexerName}\')/search.reset'} + reset.metadata = {'url': '/indexers(\'{indexerName}\')/search.reset'} # type: ignore def run( self, @@ -107,14 +114,15 @@ def run( :param indexer_name: The name of the indexer to run. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -122,7 +130,7 @@ def run( api_version = "2019-05-06-Preview" # Construct URL - url = self.run.metadata['url'] + url = self.run.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -151,7 +159,7 @@ def run( if cls: return cls(pipeline_response, None, {}) - run.metadata = {'url': '/indexers(\'{indexerName}\')/search.run'} + run.metadata = {'url': '/indexers(\'{indexerName}\')/search.run'} # type: ignore def create_or_update( self, @@ -168,7 +176,7 @@ def create_or_update( :param indexer_name: The name of the indexer to create or update. :type indexer_name: str :param indexer: The definition of the indexer to create or update. - :type indexer: ~search_service_client.models.SearchIndexer + :type indexer: ~azure.search.documents.models.SearchIndexer :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. :type if_match: str @@ -176,23 +184,25 @@ def create_or_update( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexer or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexer or ~search_service_client.models.SearchIndexer + :rtype: ~azure.search.documents.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -212,8 +222,8 @@ def create_or_update( if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -240,7 +250,7 @@ def create_or_update( return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/indexers(\'{indexerName}\')'} + create_or_update.metadata = {'url': '/indexers(\'{indexerName}\')'} # type: ignore def delete( self, @@ -262,14 +272,15 @@ def delete( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -277,7 +288,7 @@ def delete( api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -310,7 +321,7 @@ def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/indexers(\'{indexerName}\')'} + delete.metadata = {'url': '/indexers(\'{indexerName}\')'} # type: ignore def get( self, @@ -324,14 +335,15 @@ def get( :param indexer_name: The name of the indexer to retrieve. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexer or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexer + :rtype: ~azure.search.documents.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -339,7 +351,7 @@ def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -372,7 +384,7 @@ def get( return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/indexers(\'{indexerName}\')'} + get.metadata = {'url': '/indexers(\'{indexerName}\')'} # type: ignore def list( self, @@ -388,14 +400,15 @@ def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListIndexersResult or the result of cls(response) - :rtype: ~search_service_client.models.ListIndexersResult + :rtype: ~azure.search.documents.models.ListIndexersResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListIndexersResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -403,7 +416,7 @@ def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -437,7 +450,7 @@ def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/indexers'} + list.metadata = {'url': '/indexers'} # type: ignore def create( self, @@ -449,24 +462,26 @@ def create( """Creates a new indexer. :param indexer: The definition of the indexer to create. - :type indexer: ~search_service_client.models.SearchIndexer + :type indexer: ~azure.search.documents.models.SearchIndexer :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexer or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexer + :rtype: ~azure.search.documents.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -480,8 +495,8 @@ def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -503,7 +518,7 @@ def create( return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/indexers'} + create.metadata = {'url': '/indexers'} # type: ignore def get_status( self, @@ -517,14 +532,15 @@ def get_status( :param indexer_name: The name of the indexer for which to retrieve status. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerStatus or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerStatus + :rtype: ~azure.search.documents.models.SearchIndexerStatus :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerStatus"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -532,7 +548,7 @@ def get_status( api_version = "2019-05-06-Preview" # Construct URL - url = self.get_status.metadata['url'] + url = self.get_status.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -565,4 +581,4 @@ def get_status( return cls(pipeline_response, deserialized, {}) return deserialized - get_status.metadata = {'url': '/indexers(\'{indexerName}\')/search.status'} + get_status.metadata = {'url': '/indexers(\'{indexerName}\')/search.status'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py index bbfbfe51cfeb..159a57239391 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py @@ -1,20 +1,25 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from .. import models -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class IndexesOperations(object): """IndexesOperations operations. @@ -23,7 +28,7 @@ class IndexesOperations(object): instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -48,24 +53,26 @@ def create( """Creates a new search index. :param index: The definition of the index to create. - :type index: ~search_service_client.models.SearchIndex + :type index: ~azure.search.documents.models.SearchIndex :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndex or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndex + :rtype: ~azure.search.documents.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -79,8 +86,8 @@ def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -102,7 +109,7 @@ def create( return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/indexes'} + create.metadata = {'url': '/indexes'} # type: ignore def list( self, @@ -114,79 +121,61 @@ def list( """Lists all indexes available for a search service. :param select: Selects which top-level properties of the index definitions to retrieve. - Specified as a comma-separated list of JSON property names, or '*' for all properties. The - default is all properties. + Specified as a comma-separated list of JSON property names, or '*' for all properties. The + default is all properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListIndexesResult or the result of cls(response) - :rtype: ~search_service_client.models.ListIndexesResult + :rtype: ~azure.search.documents.models.ListIndexesResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListIndexesResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - else: - url = next_link - path_format_arguments = { - 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if select is not None: - query_parameters['$select'] = self._serialize.query("select", select, 'str') - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _x_ms_client_request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - header_parameters['Accept'] = 'application/json' - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def extract_data(pipeline_response): - deserialized = self._deserialize('ListIndexesResult', pipeline_response) - list_of_elem = deserialized.value - if cls: - list_of_elem = cls(list_of_elem) - return None, iter(list_of_elem) - - def get_next(next_link=None): - request = prepare_request(next_link) - - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - error = self._deserialize(models.SearchError, response) - map_error(status_code=response.status_code, response=response, error_map=error_map, model=error) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return ItemPaged( - get_next, extract_data - ) - list.metadata = {'url': '/indexes'} + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ListIndexesResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/indexes'} # type: ignore def create_or_update( self, @@ -204,7 +193,7 @@ def create_or_update( :param index_name: The definition of the index to create or update. :type index_name: str :param index: The definition of the index to create or update. - :type index: ~search_service_client.models.SearchIndex + :type index: ~azure.search.documents.models.SearchIndex :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of @@ -218,23 +207,25 @@ def create_or_update( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndex or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndex or ~search_service_client.models.SearchIndex + :rtype: ~azure.search.documents.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -256,8 +247,8 @@ def create_or_update( if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -284,7 +275,7 @@ def create_or_update( return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/indexes(\'{indexName}\')'} + create_or_update.metadata = {'url': '/indexes(\'{indexName}\')'} # type: ignore def delete( self, @@ -306,14 +297,15 @@ def delete( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -321,7 +313,7 @@ def delete( api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -354,7 +346,7 @@ def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/indexes(\'{indexName}\')'} + delete.metadata = {'url': '/indexes(\'{indexName}\')'} # type: ignore def get( self, @@ -368,14 +360,15 @@ def get( :param index_name: The name of the index to retrieve. :type index_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndex or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndex + :rtype: ~azure.search.documents.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -383,7 +376,7 @@ def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -416,7 +409,7 @@ def get( return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/indexes(\'{indexName}\')'} + get.metadata = {'url': '/indexes(\'{indexName}\')'} # type: ignore def get_statistics( self, @@ -430,14 +423,15 @@ def get_statistics( :param index_name: The name of the index for which to retrieve statistics. :type index_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: GetIndexStatisticsResult or the result of cls(response) - :rtype: ~search_service_client.models.GetIndexStatisticsResult + :rtype: ~azure.search.documents.models.GetIndexStatisticsResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.GetIndexStatisticsResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -445,7 +439,7 @@ def get_statistics( api_version = "2019-05-06-Preview" # Construct URL - url = self.get_statistics.metadata['url'] + url = self.get_statistics.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -478,7 +472,7 @@ def get_statistics( return cls(pipeline_response, deserialized, {}) return deserialized - get_statistics.metadata = {'url': '/indexes(\'{indexName}\')/search.stats'} + get_statistics.metadata = {'url': '/indexes(\'{indexName}\')/search.stats'} # type: ignore def analyze( self, @@ -493,24 +487,26 @@ def analyze( :param index_name: The name of the index for which to test an analyzer. :type index_name: str :param request: The text and analyzer or analysis components to test. - :type request: ~search_service_client.models.AnalyzeRequest + :type request: ~azure.search.documents.models.AnalyzeRequest :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: AnalyzeResult or the result of cls(response) - :rtype: ~search_service_client.models.AnalyzeResult + :rtype: ~azure.search.documents.models.AnalyzeResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.analyze.metadata['url'] + url = self.analyze.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -525,8 +521,8 @@ def analyze( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -548,4 +544,4 @@ def analyze( return cls(pipeline_response, deserialized, {}) return deserialized - analyze.metadata = {'url': '/indexes(\'{indexName}\')/search.analyze'} + analyze.metadata = {'url': '/indexes(\'{indexName}\')/search.analyze'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_search_service_client_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_search_service_client_operations.py index ce9beea5f278..f9dd44bd7b50 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_search_service_client_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_search_service_client_operations.py @@ -1,9 +1,11 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar +from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error @@ -12,8 +14,12 @@ from .. import models -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class SearchServiceClientOperationsMixin(object): @@ -26,14 +32,15 @@ def get_service_statistics( """Gets service level statistics for a search service. :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ServiceStatistics or the result of cls(response) - :rtype: ~search_service_client.models.ServiceStatistics + :rtype: ~azure.search.documents.models.ServiceStatistics :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceStatistics"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -41,7 +48,7 @@ def get_service_statistics( api_version = "2019-05-06-Preview" # Construct URL - url = self.get_service_statistics.metadata['url'] + url = self.get_service_statistics.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -73,4 +80,4 @@ def get_service_statistics( return cls(pipeline_response, deserialized, {}) return deserialized - get_service_statistics.metadata = {'url': '/servicestats'} + get_service_statistics.metadata = {'url': '/servicestats'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py index adb17bc79dc4..73c19892c504 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py @@ -1,9 +1,11 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error @@ -12,8 +14,12 @@ from .. import models -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class SkillsetsOperations(object): """SkillsetsOperations operations. @@ -22,7 +28,7 @@ class SkillsetsOperations(object): instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -53,7 +59,7 @@ def create_or_update( :type skillset_name: str :param skillset: The skillset containing one or more skills to create or update in a search service. - :type skillset: ~search_service_client.models.SearchIndexerSkillset + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. :type if_match: str @@ -61,23 +67,25 @@ def create_or_update( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerSkillset or ~search_service_client.models.SearchIndexerSkillset + :rtype: ~azure.search.documents.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), @@ -97,8 +105,8 @@ def create_or_update( if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -125,7 +133,7 @@ def create_or_update( return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + create_or_update.metadata = {'url': '/skillsets(\'{skillsetName}\')'} # type: ignore def delete( self, @@ -147,14 +155,15 @@ def delete( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -162,7 +171,7 @@ def delete( api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), @@ -195,7 +204,7 @@ def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + delete.metadata = {'url': '/skillsets(\'{skillsetName}\')'} # type: ignore def get( self, @@ -209,14 +218,15 @@ def get( :param skillset_name: The name of the skillset to retrieve. :type skillset_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerSkillset + :rtype: ~azure.search.documents.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -224,7 +234,7 @@ def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), @@ -257,7 +267,7 @@ def get( return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + get.metadata = {'url': '/skillsets(\'{skillsetName}\')'} # type: ignore def list( self, @@ -273,14 +283,15 @@ def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListSkillsetsResult or the result of cls(response) - :rtype: ~search_service_client.models.ListSkillsetsResult + :rtype: ~azure.search.documents.models.ListSkillsetsResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListSkillsetsResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -288,7 +299,7 @@ def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -322,7 +333,7 @@ def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/skillsets'} + list.metadata = {'url': '/skillsets'} # type: ignore def create( self, @@ -334,24 +345,26 @@ def create( """Creates a new skillset in a search service. :param skillset: The skillset containing one or more skills to create in a search service. - :type skillset: ~search_service_client.models.SearchIndexerSkillset + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~search_service_client.models.SearchIndexerSkillset + :rtype: ~azure.search.documents.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -365,8 +378,8 @@ def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -388,4 +401,4 @@ def create( return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/skillsets'} + create.metadata = {'url': '/skillsets'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py index ba6819c14159..c8e7b99b4e88 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py @@ -1,9 +1,11 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error @@ -12,8 +14,12 @@ from .. import models -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class SynonymMapsOperations(object): """SynonymMapsOperations operations. @@ -22,7 +28,7 @@ class SynonymMapsOperations(object): instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -52,7 +58,7 @@ def create_or_update( :param synonym_map_name: The name of the synonym map to create or update. :type synonym_map_name: str :param synonym_map: The definition of the synonym map to create or update. - :type synonym_map: ~search_service_client.models.SynonymMap + :type synonym_map: ~azure.search.documents.models.SynonymMap :param if_match: Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. :type if_match: str @@ -60,23 +66,25 @@ def create_or_update( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) - :rtype: ~search_service_client.models.SynonymMap or ~search_service_client.models.SynonymMap + :rtype: ~azure.search.documents.models.SynonymMap :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), @@ -96,8 +104,8 @@ def create_or_update( if if_none_match is not None: header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -124,7 +132,7 @@ def create_or_update( return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + create_or_update.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} # type: ignore def delete( self, @@ -146,14 +154,15 @@ def delete( if the ETag on the server does not match this value. :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -161,7 +170,7 @@ def delete( api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), @@ -194,7 +203,7 @@ def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + delete.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} # type: ignore def get( self, @@ -208,14 +217,15 @@ def get( :param synonym_map_name: The name of the synonym map to retrieve. :type synonym_map_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) - :rtype: ~search_service_client.models.SynonymMap + :rtype: ~azure.search.documents.models.SynonymMap :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -223,7 +233,7 @@ def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), @@ -256,7 +266,7 @@ def get( return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + get.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} # type: ignore def list( self, @@ -272,14 +282,15 @@ def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListSynonymMapsResult or the result of cls(response) - :rtype: ~search_service_client.models.ListSynonymMapsResult + :rtype: ~azure.search.documents.models.ListSynonymMapsResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListSynonymMapsResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -287,7 +298,7 @@ def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -321,7 +332,7 @@ def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/synonymmaps'} + list.metadata = {'url': '/synonymmaps'} # type: ignore def create( self, @@ -333,24 +344,26 @@ def create( """Creates a new synonym map. :param synonym_map: The definition of the synonym map to create. - :type synonym_map: ~search_service_client.models.SynonymMap + :type synonym_map: ~azure.search.documents.models.SynonymMap :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) - :rtype: ~search_service_client.models.SynonymMap + :rtype: ~azure.search.documents.models.SynonymMap :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -364,8 +377,8 @@ def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -387,4 +400,4 @@ def create( return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/synonymmaps'} + create.metadata = {'url': '/synonymmaps'} # type: ignore From ee34d270e170f2901f80a293c3fd107f1c5767be Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Tue, 19 May 2020 15:01:12 -0700 Subject: [PATCH 19/20] remove conflict merge junk --- .../azure-search-documents/azure/search/documents/__init__.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index 2eb9c9081574..ae2df416a676 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -67,8 +67,6 @@ FreshnessScoringParameters, GetIndexStatisticsResult, ImageAnalysisSkill, - Index, - Indexer, IndexingSchedule, IndexingParameters, InputFieldMappingEntry, @@ -168,8 +166,6 @@ "FreshnessScoringParameters", "GetIndexStatisticsResult", "ImageAnalysisSkill", - "Index", - "Indexer", "IndexingSchedule", "IndexingParameters", "IndexAction", From bb848e1728c6e58903636d42ff778d604f4c1999 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Tue, 19 May 2020 15:49:50 -0700 Subject: [PATCH 20/20] pylint --- .../azure-search-documents/azure/search/documents/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index ae2df416a676..7632f0b5daa7 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -203,6 +203,7 @@ "ScoringFunction", "ScoringProfile", "SearchDataSourcesClient", + "SearchClient", "SearchField", "SearchIndex", "SearchIndexer",