diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 784855f89a95..6dc495c82d27 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -39,6 +39,8 @@
# PRLabel: %Communication
/sdk/communication/ @acsdevx-msft
+/sdk/communication/azure-communication-phonenumbers/ @RoyHerrod @danielav7 @whisper6284 @AlonsoMondal
+/sdk/communication/azure-communication-sms/ @RoyHerrod @arifibrahim4
# PRLabel: %KeyVault
/sdk/keyvault/ @schaabs @chlowell @mccoyp @YalinLi0312
diff --git a/eng/common/testproxy/docker-start-proxy.ps1 b/eng/common/testproxy/docker-start-proxy.ps1
index 6e0d0edd2b64..d32cb4b43f48 100644
--- a/eng/common/testproxy/docker-start-proxy.ps1
+++ b/eng/common/testproxy/docker-start-proxy.ps1
@@ -25,7 +25,7 @@ catch {
Write-Error "Please check your docker invocation and try running the script again."
}
-$SELECTED_IMAGE_TAG = "1084681"
+$SELECTED_IMAGE_TAG = "1108695"
$CONTAINER_NAME = "ambitious_azsdk_test_proxy"
$LINUX_IMAGE_SOURCE = "azsdkengsys.azurecr.io/engsys/testproxy-lin:${SELECTED_IMAGE_TAG}"
$WINDOWS_IMAGE_SOURCE = "azsdkengsys.azurecr.io/engsys/testproxy-win:${SELECTED_IMAGE_TAG}"
@@ -80,4 +80,4 @@ if ($Mode -eq "stop"){
docker container stop $CONTAINER_NAME
}
}
-}
\ No newline at end of file
+}
diff --git a/eng/guardian-tools/policheck/PolicheckExclusions.xml b/eng/guardian-tools/policheck/PolicheckExclusions.xml
new file mode 100644
index 000000000000..8aff06de6994
--- /dev/null
+++ b/eng/guardian-tools/policheck/PolicheckExclusions.xml
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
diff --git a/eng/pipelines/aggregate-reports.yml b/eng/pipelines/aggregate-reports.yml
index 4c40cfcf7b4b..2aa8b4923389 100644
--- a/eng/pipelines/aggregate-reports.yml
+++ b/eng/pipelines/aggregate-reports.yml
@@ -8,60 +8,105 @@ pr:
include:
- eng/pipelines/aggregate-reports.yml
-jobs:
-- job: 'ValidateDependencies'
- variables:
+pool:
+ name: azsdk-pool-mms-win-2019-general
+ vmImage: MMS2019
+
+variables:
- template: ./templates/variables/globals.yml
- pool:
- name: azsdk-pool-mms-win-2019-general
- vmImage: MMS2019
-
- steps:
- - template: /eng/pipelines/templates/steps/analyze_dependency.yml
-
- - task: AzureFileCopy@2
- displayName: 'Upload dependency report'
- condition: and(succeededOrFailed(), eq(variables['System.TeamProject'], 'internal'))
- inputs:
- sourcePath: '$(Build.ArtifactStagingDirectory)/reports'
- azureSubscription: 'Azure SDK Artifacts'
- destination: AzureBlob
- storage: azuresdkartifacts
- containerName: 'azure-sdk-for-python'
- blobPrefix: dependencies
-
- - task: PowerShell@2
- displayName: "Verify Repository Resource Refs"
- inputs:
- pwsh: true
- workingDirectory: $(Build.SourcesDirectory)
- filePath: eng/common/scripts/Verify-Resource-Ref.ps1
-
- - task: securedevelopmentteam.vss-secure-development-tools.build-task-credscan.CredScan@3
- displayName: 'Run CredScan'
- condition: succeededOrFailed()
- inputs:
- suppressionsFile: 'eng\CredScanSuppression.json'
- - task: securedevelopmentteam.vss-secure-development-tools.build-task-postanalysis.PostAnalysis@2
- displayName: 'Post Analysis'
- condition: succeededOrFailed()
- inputs:
- GdnBreakAllTools: false
- GdnBreakGdnToolCredScan: true
- GdnBreakGdnToolCredScanSeverity: Error
- GdnBreakBaselineFiles: $(Build.SourcesDirectory)\eng\python.gdnbaselines
- GdnBreakBaselines: baseline
- # Used for generating baseline file.
- # GdnBreakOutputBaselineFile: python
- # GdnBreakOutputBaseline: baseline
- continueOnError: true
- - task: securedevelopmentteam.vss-secure-development-tools.build-task-publishsecurityanalysislogs.PublishSecurityAnalysisLogs@3
- displayName: 'Publish Security Analysis Logs'
- continueOnError: true
- condition: succeededOrFailed()
- - template: ../common/pipelines/templates/steps/verify-links.yml
- parameters:
- Directory: ""
- CheckLinkGuidance: $true
- Condition: succeededOrFailed()
+stages:
+ - stage: ValidateDependencies
+ displayName: Validate Dependencies
+
+ jobs:
+ - job: ValidateDependencies
+ timeoutInMinutes: 120
+ steps:
+
+ - template: /eng/pipelines/templates/steps/analyze_dependency.yml
+
+ - task: AzureFileCopy@2
+ displayName: 'Upload dependency report'
+ condition: and(succeededOrFailed(), eq(variables['System.TeamProject'], 'internal'))
+ inputs:
+ sourcePath: '$(Build.ArtifactStagingDirectory)/reports'
+ azureSubscription: 'Azure SDK Artifacts'
+ destination: AzureBlob
+ storage: azuresdkartifacts
+ containerName: 'azure-sdk-for-python'
+ blobPrefix: dependencies
+
+ - task: PowerShell@2
+ displayName: "Verify Repository Resource Refs"
+ inputs:
+ pwsh: true
+ workingDirectory: $(Build.SourcesDirectory)
+ filePath: eng/common/scripts/Verify-Resource-Ref.ps1
+
+ - template: ../common/pipelines/templates/steps/verify-links.yml
+ parameters:
+ Directory: ""
+ CheckLinkGuidance: $true
+ Condition: succeededOrFailed()
+
+ - stage: ComplianceTools
+ displayName: Compliance Tools
+ dependsOn: []
+
+ jobs:
+ - job: ComplianceTools
+ timeoutInMinutes: 120
+ steps:
+ - task: securedevelopmentteam.vss-secure-development-tools.build-task-credscan.CredScan@3
+ displayName: 'Run CredScan'
+ condition: succeededOrFailed()
+ inputs:
+ suppressionsFile: 'eng\CredScanSuppression.json'
+
+ - task: securedevelopmentteam.vss-secure-development-tools.build-task-postanalysis.PostAnalysis@2
+ displayName: 'Post Analysis'
+ condition: succeededOrFailed()
+ inputs:
+ GdnBreakAllTools: false
+ GdnBreakGdnToolCredScan: true
+ GdnBreakGdnToolCredScanSeverity: Error
+ GdnBreakBaselineFiles: $(Build.SourcesDirectory)\eng\python.gdnbaselines
+ GdnBreakBaselines: baseline
+ # Used for generating baseline file.
+ # GdnBreakOutputBaselineFile: python
+ # GdnBreakOutputBaseline: baseline
+ continueOnError: true
+
+ - pwsh: |
+ azcopy copy "https://azuresdkartifacts.blob.core.windows.net/policheck/PythonPoliCheckExclusion.mdb?$(azuresdk-policheck-blob-SAS)" `
+ "$(Build.BinariesDirectory)"
+ displayName: 'Download PoliCheck Exclusion Database'
+ condition: succeededOrFailed()
+
+ - task: securedevelopmentteam.vss-secure-development-tools.build-task-policheck.PoliCheck@2
+ displayName: 'Run PoliCheck'
+ inputs:
+ targetType: F
+ targetArgument: '$(Build.SourcesDirectory)'
+ result: PoliCheck.sarif
+ optionsFC: 0
+ optionsXS: 1
+ optionsPE: 1|2|3|4
+ optionsRulesDBPath: "$(Build.BinariesDirectory)/PythonPoliCheckExclusion.mdb"
+ optionsUEPATH: "$(Build.SourcesDirectory)/eng/guardian-tools/policheck/PolicheckExclusions.xml"
+ condition: succeededOrFailed()
+
+ - task: securedevelopmentteam.vss-secure-development-tools.build-task-postanalysis.PostAnalysis@2
+ displayName: 'Post Analysis (PoliCheck)'
+ inputs:
+ GdnBreakAllTools: false
+ GdnBreakGdnToolPoliCheck: true
+ GdnBreakGdnToolPoliCheckSeverity: Warning
+ condition: succeededOrFailed()
+ continueOnError: true
+
+ - task: securedevelopmentteam.vss-secure-development-tools.build-task-publishsecurityanalysislogs.PublishSecurityAnalysisLogs@3
+ displayName: 'Publish Security Analysis Logs'
+ continueOnError: true
+ condition: succeededOrFailed()
diff --git a/scripts/devops_tasks/test_run_samples.py b/scripts/devops_tasks/test_run_samples.py
index 2dae08c2f310..cc9a908fa543 100644
--- a/scripts/devops_tasks/test_run_samples.py
+++ b/scripts/devops_tasks/test_run_samples.py
@@ -125,6 +125,13 @@
"sample_list_translations_with_filters.py",
"sample_translation_with_custom_model.py",
"sample_translation_with_custom_model_async.py",
+ ],
+ "azure-ai-formrecognizer": [
+ "sample_manage_custom_models.py",
+ "sample_manage_custom_models_async.py",
+ ],
+ "azure-ai-language-questionanswering": [
+ "sample_chat.py"
]
}
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/CHANGELOG.md b/sdk/cognitivelanguage/azure-ai-language-questionanswering/CHANGELOG.md
index 8df38ca5c8f8..15558180c1de 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/CHANGELOG.md
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/CHANGELOG.md
@@ -5,10 +5,20 @@
* We are now targeting service version `2021-07-15-preview`
### Breaking changes
+
* The method `QuestionAnsweringClient.query_knowledgebase` has been renamed to `query_knowledge_base`.
+* Options bag model `KnowledgeBaseQueryOptions` for `query_knowledge_base` is renamed to `QueryKnowledgeBaseOptions`
+* Options bag model `TextQueryOptions` for `query_text` is renamed to `QueryTextOptions`
+* The filters model `StrictFilters` is renamed to `QueryFilters`
+* Enum `CompoundOperationKind` is renamed to `LogicalOperationKind`
+* We have removed the `string_index_type` input to all models and operations. We have also removed the `StringIndexType` enum.
+* The type of input `metadata` to `MetadataFilter` has changed from a dictionary of strings to a list of key-value tuples. For example, the input has changed from `{"key": "value"}` to `[("key", "value")]`.
+* The input to the `query_knowledge_base` and `query_text` overloads that take in a positional model for the body should be considered positional only.
### Features Added
+
* The method `QuestionAnsweringClient.query_text` now supports a list of records as strings, where the ID value will be automatically populated.
+* Added keyword argument `default_language` onto `QuestionAnsweringClient`, which has default value `'en'`. The default language for any operation call will be this default language value.
## 1.0.0b1 (2021-07-27)
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/README.md b/sdk/cognitivelanguage/azure-ai-language-questionanswering/README.md
index 06fb24064963..2e3979bc06b8 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/README.md
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/README.md
@@ -6,13 +6,17 @@ Question Answering is a cloud-based API service that lets you create a conversat
[Source code][questionanswering_client_src] | [Package (PyPI)][questionanswering_pypi_package] | [API reference documentation][questionanswering_refdocs] | [Product documentation][questionanswering_docs] | [Samples][questionanswering_samples]
+## _Disclaimer_
+
+_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_
+
## Getting started
### Prerequisites
-* Python 2.7, or 3.6 or later is required to use this package.
-* An [Azure subscription][azure_subscription]
-* An existing Question Answering resource
+- Python 2.7, or 3.6 or later is required to use this package.
+- An [Azure subscription][azure_subscription]
+- An existing Question Answering resource
> Note: the new unified Cognitive Language Services are not currently available for deployment.
@@ -56,7 +60,7 @@ client = QuestionAnsweringClient(endpoint, credential)
### QuestionAnsweringClient
-The [QuestionAnsweringClient][questionanswering_client_class] is the primary interface for asking questions using a knowledge base with your own information, or text input using pre-trained models.
+The [QuestionAnsweringClient][questionanswering_client_class] is the primary interface for asking questions using a knowledge base with your own information, or text input using pre-trained models.
For asynchronous operations, an async `QuestionAnsweringClient` is in the `azure.ai.language.questionanswering.aio` namespace.
## Examples
@@ -64,6 +68,7 @@ For asynchronous operations, an async `QuestionAnsweringClient` is in the `azure
The `azure-ai-language-questionanswering` client library provides both synchronous and asynchronous APIs.
The following examples show common scenarios using the `client` [created above](#create-questionansweringclient).
+
- [Ask a question](#ask-a-question)
- [Ask a follow-up question](#ask-a-follow-up-question)
- [Asynchronous operations](#asynchronous-operations)
@@ -75,7 +80,7 @@ The only input required to ask a question using a knowledge base is just the que
```python
from azure.ai.language.questionanswering import models as qna
-params = qna.KnowledgeBaseQueryOptions(
+params = qna.QueryKnowledgeBaseOptions(
question="How long should my Surface battery last?"
)
@@ -89,14 +94,14 @@ for candidate in output.answers:
```
-You can set additional properties on `KnowledgeBaseQueryOptions` to limit the number of answers, specify a minimum confidence score, and more.
+You can set additional properties on `QueryKnowledgeBaseOptions` to limit the number of answers, specify a minimum confidence score, and more.
### Ask a follow-up question
If your knowledge base is configured for [chit-chat][questionanswering_docs_chat], the answers from the knowledge base may include suggested [prompts for follow-up questions][questionanswering_refdocs_prompts] to initiate a conversation. You can ask a follow-up question by providing the ID of your chosen answer as the context for the continued conversation:
```python
-params = qna.models.KnowledgeBaseQueryOptions(
+params = qna.models.QueryKnowledgeBaseOptions(
question="How long should charging take?"
context=qna.models.KnowledgeBaseAnswerRequestContext(
previous_qna_id=previous_answer.id
@@ -112,9 +117,11 @@ for candidate in output.answers:
print("Source: {}".format(candidate.source))
```
+
### Asynchronous operations
The above examples can also be run asynchronously using the client in the `aio` namespace:
+
```python
from azure.core.credentials import AzureKeyCredential
from azure.ai.language.questionanswering.aio import QuestionAnsweringClient
@@ -122,7 +129,7 @@ from azure.ai.language.questionanswering import models as qna
client = QuestionAnsweringClient(endpoint, credential)
-params = qna.KnowledgeBaseQueryOptions(
+params = qna.QueryKnowledgeBaseOptions(
question="How long should my Surface battery last?"
)
@@ -133,11 +140,13 @@ output = await client.query_knowledgebase(
```
## Optional Configuration
+
Optional keyword arguments can be passed in at the client and per-operation level. The azure-core [reference documentation][azure_core_ref_docs] describes available configurations for retries, logging, transport protocols, and more.
## Troubleshooting
### General
+
Azure QuestionAnswering clients raise exceptions defined in [Azure Core][azure_core_readme].
When you interact with the Cognitive Language Services Question Answering client library using the Python SDK, errors returned by the service correspond to the same HTTP status codes returned for [REST API][questionanswering_rest_docs] requests.
@@ -156,6 +165,7 @@ except HttpResponseError as error:
```
### Logging
+
This library uses the standard
[logging][python_logging] library for logging.
Basic information about HTTP sessions (URLs, headers, etc.) is logged at INFO
@@ -168,9 +178,9 @@ See full SDK logging documentation with examples [here][sdk_logging_docs].
## Next steps
-* View our [samples][questionanswering_samples].
-* Read about the different [features][questionanswering_docs_features] of the Question Answering service.
-* Try our service [demos][questionanswering_docs_demos].
+- View our [samples][questionanswering_samples].
+- Read about the different [features][questionanswering_docs_features] of the Question Answering service.
+- Try our service [demos][questionanswering_docs_demos].
## Contributing
@@ -183,6 +193,7 @@ When you submit a pull request, a CLA-bot will automatically determine whether y
This project has adopted the [Microsoft Open Source Code of Conduct][code_of_conduct]. For more information see the [Code of Conduct FAQ][coc_faq] or contact [opencode@microsoft.com][coc_contact] with any additional questions or comments.
+
[azure_cli]: https://docs.microsoft.com/cli/azure/
[azure_portal]: https://portal.azure.com/
[azure_subscription]: https://azure.microsoft.com/free/
@@ -196,7 +207,7 @@ This project has adopted the [Microsoft Open Source Code of Conduct][code_of_con
[sdk_logging_docs]: https://docs.microsoft.com/azure/developer/python/azure-sdk-logging
[azure_core_ref_docs]: https://azuresdkdocs.blob.core.windows.net/$web/python/azure-core/latest/azure.core.html
[azure_core_readme]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/core/azure-core/README.md
-[pip_link]:https://pypi.org/project/pip/
+[pip_link]: https://pypi.org/project/pip/
[questionanswering_client_class]: https://azuresdkdocs.blob.core.windows.net/$web/python/azure-ai-language-questionanswering/1.0.0b1/azure.ai.language.questionanswering.html#azure.ai.language.questionanswering.QuestionAnsweringClient
[questionanswering_refdocs_prompts]: https://azuresdkdocs.blob.core.windows.net/$web/python/azure-ai-language-questionanswering/1.0.0b1/azure.ai.language.questionanswering.models.html#azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog
[questionanswering_client_src]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-questionanswering/
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_patch.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_patch.py
index 7308492dcdab..44a5cd3f4d5c 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_patch.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_patch.py
@@ -36,3 +36,29 @@ def _validate_text_records(records):
else:
request_batch.append(doc)
return request_batch
+
+def _get_positional_body(*args, **kwargs):
+ """Verify args and kwargs are valid, and then return the positional body, if users passed it in."""
+ if len(args) > 1:
+ raise TypeError("There can only be one positional argument, which is the POST body of this request.")
+ if args and "options" in kwargs:
+ raise TypeError(
+ "You have already supplied the request body as a positional parameter, "
+ "you can not supply it as a keyword argument as well."
+ )
+ return args[0] if args else None
+
+def _verify_qna_id_and_question(query_knowledgebase_options):
+ """For query_knowledge_base we require either `question` or `qna_id`."""
+ try:
+ qna_id = query_knowledgebase_options.qna_id
+ question = query_knowledgebase_options.question
+ except AttributeError:
+ qna_id = query_knowledgebase_options.get("qna_id") or query_knowledgebase_options.get("qnaId")
+ question = query_knowledgebase_options.get("question")
+ if not (qna_id or question):
+ raise TypeError(
+ "You need to pass in either `qna_id` or `question`."
+ )
+ if qna_id and question:
+ raise TypeError("You can not specify both `qna_id` and `question`.")
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_question_answering_client.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_question_answering_client.py
index 9de9e08df077..96c7eb2e28fc 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_question_answering_client.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_question_answering_client.py
@@ -32,6 +32,7 @@ class QuestionAnsweringClient(QuestionAnsweringClientOperationsMixin):
:type endpoint: str
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.AzureKeyCredential
+ :keyword str default_language: Sets the default language to use for all operations.
"""
def __init__(
@@ -49,6 +50,7 @@ def __init__(
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
+ self._default_language = kwargs.pop("default_language", None)
def send_request(
self,
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/_question_answering_client.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/_question_answering_client.py
index 6994f258008f..a506cabb9960 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/_question_answering_client.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/_question_answering_client.py
@@ -27,6 +27,7 @@ class QuestionAnsweringClient(QuestionAnsweringClientOperationsMixin):
:type endpoint: str
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.AzureKeyCredential
+ :keyword str default_language: Sets the default language to use for all operations.
"""
def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None:
@@ -38,6 +39,7 @@ def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any)
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
+ self._default_language = kwargs.pop("default_language", None)
def send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/operations/_operations.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/operations/_operations.py
index 6775164a54ba..0c6cde294709 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/operations/_operations.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/operations/_operations.py
@@ -22,7 +22,7 @@
from ... import models as _models
from ...operations._operations import build_query_knowledge_base_request, build_query_text_request
-from ..._patch import _validate_text_records
+from ..._patch import _validate_text_records, _get_positional_body, _verify_qna_id_and_question
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -32,23 +32,22 @@ class QuestionAnsweringClientOperationsMixin:
@overload
async def query_knowledge_base(
self,
- knowledge_base_query_options: "_models.KnowledgeBaseQueryOptions",
+ options: "_models.QueryKnowledgeBaseOptions",
*,
project_name: str,
- deployment_name: Optional[str] = None,
+ deployment_name: str,
**kwargs: Any
) -> "_models.KnowledgeBaseAnswers":
"""Answers the specified question using your knowledge base.
- :param knowledge_base_query_options: Post body of the request.
- :type knowledge_base_query_options:
- ~azure.ai.language.questionanswering.models.KnowledgeBaseQueryOptions
+ :param options: Positional-only POST body of the request.
+ :type options:
+ ~azure.ai.language.questionanswering.models.QueryKnowledgeBaseOptions
:keyword project_name: The name of the project to use.
:paramtype project_name: str
:keyword deployment_name: The name of the specific deployment of the project to use.
:paramtype deployment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: KnowledgeBaseAnswers, or the result of cls(response)
+ :return: KnowledgeBaseAnswers
:rtype: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
@@ -59,7 +58,7 @@ async def query_knowledge_base(
self,
*,
project_name: str,
- deployment_name: Optional[str] = None,
+ deployment_name: str,
qna_id: Optional[int] = None,
question: Optional[str] = None,
top: Optional[int] = None,
@@ -67,7 +66,7 @@ async def query_knowledge_base(
confidence_score_threshold: Optional[float] = None,
context: Optional["_models.KnowledgeBaseAnswerRequestContext"] = None,
ranker_type: Optional[Union[str, "_models.RankerType"]] = None,
- strict_filters: Optional["_models.StrictFilters"] = None,
+ filters: Optional["_models.QueryFilters"] = None,
answer_span_request: Optional["_models.AnswerSpanRequest"] = None,
include_unstructured_sources: Optional[bool] = None,
**kwargs: Any
@@ -95,14 +94,12 @@ async def query_knowledge_base(
:keyword ranker_type: (Optional) Set to 'QuestionOnly' for using a question only Ranker. Possible
values include: "Default", "QuestionOnly".
:paramtype ranker_type: str or ~azure.ai.language.questionanswering.models.RankerType
- :keyword strict_filters: Filter QnAs based on give metadata list and knowledge base source names.
- :paramtype strict_filters: ~azure.ai.language.questionanswering.models.StrictFilters
+ :keyword filters: Filter QnAs based on give metadata list and knowledge base source names.
+ :paramtype filters: ~azure.ai.language.questionanswering.models.QueryFilters
:keyword answer_span_request: To configure Answer span prediction feature.
:paramtype answer_span_request: ~azure.ai.language.questionanswering.models.AnswerSpanRequest
:keyword include_unstructured_sources: (Optional) Flag to enable Query over Unstructured Sources.
:paramtype include_unstructured_sources: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: KnowledgeBaseAnswers, or the result of cls(response)
:rtype: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
@@ -115,17 +112,17 @@ async def query_knowledge_base(
) -> "_models.KnowledgeBaseAnswers":
"""Answers the specified question using your knowledge base.
- :param knowledge_base_query_options: Post body of the request. Provide either `knowledge_base_query_options`, OR
+ :param options: POST body of the request. Provide either `options`, OR
individual keyword arguments. If both are provided, only the options object will be used.
- :type knowledge_base_query_options:
- ~azure.ai.language.questionanswering.models.KnowledgeBaseQueryOptions
+ :type options:
+ ~azure.ai.language.questionanswering.models.QueryKnowledgeBaseOptions
:keyword project_name: The name of the project to use.
:paramtype project_name: str
:keyword deployment_name: The name of the specific deployment of the project to use.
:paramtype deployment_name: str
:keyword qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over question.
:paramtype qna_id: int
- :keyword question: User question to query against the knowledge base. Provide either `knowledge_base_query_options`, OR
+ :keyword question: User question to query against the knowledge base. Provide either `options`, OR
individual keyword arguments. If both are provided, only the options object will be used.
:paramtype question: str
:keyword top: Max number of answers to be returned for the question.
@@ -139,40 +136,37 @@ async def query_knowledge_base(
:keyword ranker_type: (Optional) Set to 'QuestionOnly' for using a question only Ranker. Possible
values include: "Default", "QuestionOnly".
:paramtype ranker_type: str or ~azure.ai.language.questionanswering.models.RankerType
- :keyword strict_filters: Filter QnAs based on give metadata list and knowledge base source names.
- :paramtype strict_filters: ~azure.ai.language.questionanswering.models.StrictFilters
+ :keyword filters: Filter QnAs based on give metadata list and knowledge base source names.
+ :paramtype filters: ~azure.ai.language.questionanswering.models.QueryFilters
:keyword answer_span_request: To configure Answer span prediction feature.
:paramtype answer_span_request: ~azure.ai.language.questionanswering.models.AnswerSpanRequest
:keyword include_unstructured_sources: (Optional) Flag to enable Query over Unstructured Sources.
:paramtype include_unstructured_sources: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: KnowledgeBaseAnswers, or the result of cls(response)
+ :return: KnowledgeBaseAnswers
:rtype: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
- if args:
- knowledge_base_query_options = args[0]
- else:
- knowledge_base_query_options = _models.KnowledgeBaseQueryOptions(
- qna_id=kwargs.pop("qna_id", None),
- question=kwargs.pop("question", None),
- top=kwargs.pop("top", None),
- user_id=kwargs.pop("user_id", None),
- confidence_score_threshold=kwargs.pop("confidence_score_threshold", None),
- context=kwargs.pop("context", None),
- ranker_type=kwargs.pop("ranker_type", None),
- strict_filters=kwargs.pop("strict_filters", None),
- answer_span_request=kwargs.pop("answer_span_request", None),
- include_unstructured_sources=kwargs.pop("include_unstructured_sources", None)
- )
+ options = _get_positional_body(*args, **kwargs) or _models.QueryKnowledgeBaseOptions(
+ qna_id=kwargs.pop("qna_id", None),
+ question=kwargs.pop("question", None),
+ top=kwargs.pop("top", None),
+ user_id=kwargs.pop("user_id", None),
+ confidence_score_threshold=kwargs.pop("confidence_score_threshold", None),
+ context=kwargs.pop("context", None),
+ ranker_type=kwargs.pop("ranker_type", None),
+ filters=kwargs.pop("filters", None),
+ answer_span_request=kwargs.pop("answer_span_request", None),
+ include_unstructured_sources=kwargs.pop("include_unstructured_sources", None)
+ )
+ _verify_qna_id_and_question(options)
cls = kwargs.pop("cls", None) # type: ClsType["_models.KnowledgeBaseAnswers"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
project_name = kwargs.pop("project_name") # type: str
- deployment_name = kwargs.pop("deployment_name", None) # type: Optional[str]
+ deployment_name = kwargs.pop("deployment_name") # type: str
- json = self._serialize.body(knowledge_base_query_options, "KnowledgeBaseQueryOptions")
+ json = self._serialize.body(options, "QueryKnowledgeBaseOptions")
request = build_query_knowledge_base_request(
content_type=content_type,
@@ -186,9 +180,7 @@ async def query_knowledge_base(
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(
- request, stream=False, _return_pipeline_response=True, **kwargs
- )
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
@@ -206,15 +198,12 @@ async def query_knowledge_base(
query_knowledge_base.metadata = {"url": "/:query-knowledgebases"} # type: ignore
@overload
- async def query_text(
- self, text_query_options: "_models.TextQueryOptions", **kwargs: Any
- ) -> "_models.TextAnswers":
+ async def query_text(self, options: "_models.QueryTextOptions", **kwargs: Any) -> "_models.TextAnswers":
"""Answers the specified question using the provided text in the body.
- :param text_query_options: Post body of the request.
- :type text_query_options: ~azure.ai.language.questionanswering.models.TextQueryOptions
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: TextAnswers, or the result of cls(response)
+ :param options: Positional-only POST body of the request.
+ :type options: ~azure.ai.language.questionanswering.models.QueryTextOptions
+ :return: TextAnswers
:rtype: ~azure.ai.language.questionanswering.models.TextAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
@@ -227,7 +216,6 @@ async def query_text(
question: str,
records: List["_models.TextRecord"],
language: Optional[str] = None,
- string_index_type: Optional[Union[str, "_models.StringIndexType"]] = "TextElements_v8",
**kwargs: Any
) -> "_models.TextAnswers":
"""Answers the specified question using the provided text in the body.
@@ -240,13 +228,7 @@ async def query_text(
example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
default.
:paramtype language: str
- :keyword string_index_type: Specifies the method used to interpret string offsets. Defaults to
- Text Elements (Graphemes) according to Unicode v8.0.0. For additional information see
- https://aka.ms/text-analytics-offsets. Possible values include: "TextElements_v8",
- "UnicodeCodePoint", "Utf16CodeUnit". Default value: "TextElements_v8".
- :paramtype string_index_type: str or ~azure.ai.language.questionanswering.models.StringIndexType
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: TextAnswers, or the result of cls(response)
+ :return: TextAnswers
:rtype: ~azure.ai.language.questionanswering.models.TextAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
@@ -257,48 +239,38 @@ async def query_text(
) -> "_models.TextAnswers":
"""Answers the specified question using the provided text in the body.
- :param text_query_options: Post body of the request. Provide either `text_query_options`, OR
+ :param options: POST body of the request. Provide either `options`, OR
individual keyword arguments. If both are provided, only the options object will be used.
- :type text_query_options: ~azure.ai.language.questionanswering.models.TextQueryOptions
- :keyword question: User question to query against the given text records. Provide either `text_query_options`, OR
+ :type options: ~azure.ai.language.questionanswering.models.QueryTextOptions
+ :keyword question: User question to query against the given text records. Provide either `options`, OR
individual keyword arguments. If both are provided, only the options object will be used.
:paramtype question: str
- :keyword records: Text records to be searched for given question. Provide either `text_query_options`, OR
+ :keyword records: Text records to be searched for given question. Provide either `options`, OR
individual keyword arguments. If both are provided, only the options object will be used.
:paramtype records: list[~azure.ai.language.questionanswering.models.TextRecord]
:keyword language: Language of the text records. This is BCP-47 representation of a language. For
example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as default.
:paramtype language: str
- :keyword string_index_type: Specifies the method used to interpret string offsets. Defaults to
- Text Elements (Graphemes) according to Unicode v8.0.0. For additional information see
- https://aka.ms/text-analytics-offsets. Possible values include: "TextElements_v8",
- "UnicodeCodePoint", "Utf16CodeUnit". Default value: "TextElements_v8".
- :paramtype string_index_type: str or ~azure.ai.language.questionanswering.models.StringIndexType
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: TextAnswers, or the result of cls(response)
+ :return: TextAnswers
:rtype: ~azure.ai.language.questionanswering.models.TextAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
- if args:
- text_query_options = args[0]
- else:
- text_query_options = _models.TextQueryOptions(
- question=kwargs.pop("question"),
- records=kwargs.pop("records"),
- language=kwargs.pop("language", None),
- string_index_type=kwargs.pop("string_index_type", "TextElements_v8")
- )
+ options = _get_positional_body(*args, **kwargs) or _models.QueryTextOptions(
+ question=kwargs.pop("question"),
+ records=kwargs.pop("records"),
+ language=kwargs.pop("language", self._default_language),
+ )
try:
- text_query_options['records'] = _validate_text_records(text_query_options['records'])
+ options['records'] = _validate_text_records(options['records'])
except TypeError:
- text_query_options.records = _validate_text_records(text_query_options.records)
+ options.records = _validate_text_records(options.records)
cls = kwargs.pop("cls", None) # type: ClsType["_models.TextAnswers"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
- json = self._serialize.body(text_query_options, "TextQueryOptions")
+ json = self._serialize.body(options, "QueryTextOptions")
request = build_query_text_request(
content_type=content_type,
@@ -310,9 +282,7 @@ async def query_text(
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(
- request, stream=False, _return_pipeline_response=True, **kwargs
- )
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/__init__.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/__init__.py
index 55d25e6fff0c..e7f241830c2f 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/__init__.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/__init__.py
@@ -17,12 +17,12 @@
from ._models_py3 import KnowledgeBaseAnswerPrompt
from ._models_py3 import KnowledgeBaseAnswerRequestContext
from ._models_py3 import KnowledgeBaseAnswers
- from ._models_py3 import KnowledgeBaseQueryOptions
from ._models_py3 import MetadataFilter
- from ._models_py3 import StrictFilters
+ from ._models_py3 import QueryFilters
+ from ._models_py3 import QueryKnowledgeBaseOptions
+ from ._models_py3 import QueryTextOptions
from ._models_py3 import TextAnswer
from ._models_py3 import TextAnswers
- from ._models_py3 import TextQueryOptions
from ._models_py3 import TextRecord
except (SyntaxError, ImportError):
from ._models import AnswerSpan # type: ignore
@@ -35,20 +35,19 @@
from ._models import KnowledgeBaseAnswerPrompt # type: ignore
from ._models import KnowledgeBaseAnswerRequestContext # type: ignore
from ._models import KnowledgeBaseAnswers # type: ignore
- from ._models import KnowledgeBaseQueryOptions # type: ignore
from ._models import MetadataFilter # type: ignore
- from ._models import StrictFilters # type: ignore
+ from ._models import QueryFilters # type: ignore
+ from ._models import QueryKnowledgeBaseOptions # type: ignore
+ from ._models import QueryTextOptions # type: ignore
from ._models import TextAnswer # type: ignore
from ._models import TextAnswers # type: ignore
- from ._models import TextQueryOptions # type: ignore
from ._models import TextRecord # type: ignore
from ._question_answering_client_enums import (
- CompoundOperationKind,
ErrorCode,
InnerErrorCode,
+ LogicalOperationKind,
RankerType,
- StringIndexType,
)
__all__ = [
@@ -62,16 +61,15 @@
"KnowledgeBaseAnswerPrompt",
"KnowledgeBaseAnswerRequestContext",
"KnowledgeBaseAnswers",
- "KnowledgeBaseQueryOptions",
"MetadataFilter",
- "StrictFilters",
+ "QueryFilters",
+ "QueryKnowledgeBaseOptions",
+ "QueryTextOptions",
"TextAnswer",
"TextAnswers",
- "TextQueryOptions",
"TextRecord",
- "CompoundOperationKind",
"ErrorCode",
"InnerErrorCode",
+ "LogicalOperationKind",
"RankerType",
- "StringIndexType",
]
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models.py
index e16ad163e9fe..0946d3315693 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models.py
@@ -13,14 +13,14 @@
class AnswerSpan(msrest.serialization.Model):
"""Answer span object of QnA.
- :keyword text: Predicted text of answer span.
- :paramtype text: str
- :keyword confidence_score: Predicted score of answer span, value ranges from 0 to 1.
- :paramtype confidence_score: float
- :keyword offset: The answer span offset from the start of answer.
- :paramtype offset: int
- :keyword length: The length of the answer span.
- :paramtype length: int
+ :ivar text: Predicted text of answer span.
+ :vartype text: str
+ :ivar confidence_score: Predicted score of answer span, value ranges from 0 to 1.
+ :vartype confidence_score: float
+ :ivar offset: The answer span offset from the start of answer.
+ :vartype offset: int
+ :ivar length: The length of the answer span.
+ :vartype length: int
"""
_validation = {
@@ -35,6 +35,16 @@ class AnswerSpan(msrest.serialization.Model):
}
def __init__(self, **kwargs):
+ """
+ :keyword text: Predicted text of answer span.
+ :paramtype text: str
+ :keyword confidence_score: Predicted score of answer span, value ranges from 0 to 1.
+ :paramtype confidence_score: float
+ :keyword offset: The answer span offset from the start of answer.
+ :paramtype offset: int
+ :keyword length: The length of the answer span.
+ :paramtype length: int
+ """
super(AnswerSpan, self).__init__(**kwargs)
self.text = kwargs.get("text", None)
self.confidence_score = kwargs.get("confidence_score", None)
@@ -45,14 +55,14 @@ def __init__(self, **kwargs):
class AnswerSpanRequest(msrest.serialization.Model):
"""To configure Answer span prediction feature.
- :keyword enable: Enable or disable Answer Span prediction.
- :paramtype enable: bool
- :keyword confidence_score_threshold: Minimum threshold score required to include an answer
- span, value ranges from 0 to 1.
- :paramtype confidence_score_threshold: float
- :keyword top_answers_with_span: Number of Top answers to be considered for span prediction from
- 1 to 10.
- :paramtype top_answers_with_span: int
+ :ivar enable: Enable or disable Answer Span prediction.
+ :vartype enable: bool
+ :ivar confidence_score_threshold: Minimum threshold score required to include an answer span,
+ value ranges from 0 to 1.
+ :vartype confidence_score_threshold: float
+ :ivar top_answers_with_span: Number of Top answers to be considered for span prediction from 1
+ to 10.
+ :vartype top_answers_with_span: int
"""
_validation = {
@@ -67,6 +77,16 @@ class AnswerSpanRequest(msrest.serialization.Model):
}
def __init__(self, **kwargs):
+ """
+ :keyword enable: Enable or disable Answer Span prediction.
+ :paramtype enable: bool
+ :keyword confidence_score_threshold: Minimum threshold score required to include an answer
+ span, value ranges from 0 to 1.
+ :paramtype confidence_score_threshold: float
+ :keyword top_answers_with_span: Number of Top answers to be considered for span prediction from
+ 1 to 10.
+ :paramtype top_answers_with_span: int
+ """
super(AnswerSpanRequest, self).__init__(**kwargs)
self.enable = kwargs.get("enable", None)
self.confidence_score_threshold = kwargs.get("confidence_score_threshold", None)
@@ -78,19 +98,19 @@ class Error(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword code: Required. One of a server-defined set of error codes. Possible values include:
+ :ivar code: Required. One of a server-defined set of error codes. Possible values include:
"InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound",
"TooManyRequests", "InternalServerError", "ServiceUnavailable".
- :paramtype code: str or ~azure.ai.language.questionanswering.models.ErrorCode
- :keyword message: Required. A human-readable representation of the error.
- :paramtype message: str
- :keyword target: The target of the error.
- :paramtype target: str
- :keyword details: An array of details about specific errors that led to this reported error.
- :paramtype details: list[~azure.ai.language.questionanswering.models.Error]
- :keyword innererror: An object containing more specific information than the current object
- about the error.
- :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
+ :vartype code: str or ~azure.ai.language.questionanswering.models.ErrorCode
+ :ivar message: Required. A human-readable representation of the error.
+ :vartype message: str
+ :ivar target: The target of the error.
+ :vartype target: str
+ :ivar details: An array of details about specific errors that led to this reported error.
+ :vartype details: list[~azure.ai.language.questionanswering.models.Error]
+ :ivar innererror: An object containing more specific information than the current object about
+ the error.
+ :vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
"""
_validation = {
@@ -107,6 +127,21 @@ class Error(msrest.serialization.Model):
}
def __init__(self, **kwargs):
+ """
+ :keyword code: Required. One of a server-defined set of error codes. Possible values include:
+ "InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound",
+ "TooManyRequests", "InternalServerError", "ServiceUnavailable".
+ :paramtype code: str or ~azure.ai.language.questionanswering.models.ErrorCode
+ :keyword message: Required. A human-readable representation of the error.
+ :paramtype message: str
+ :keyword target: The target of the error.
+ :paramtype target: str
+ :keyword details: An array of details about specific errors that led to this reported error.
+ :paramtype details: list[~azure.ai.language.questionanswering.models.Error]
+ :keyword innererror: An object containing more specific information than the current object
+ about the error.
+ :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
+ """
super(Error, self).__init__(**kwargs)
self.code = kwargs["code"]
self.message = kwargs["message"]
@@ -118,8 +153,8 @@ def __init__(self, **kwargs):
class ErrorResponse(msrest.serialization.Model):
"""Error response.
- :keyword error: The error object.
- :paramtype error: ~azure.ai.language.questionanswering.models.Error
+ :ivar error: The error object.
+ :vartype error: ~azure.ai.language.questionanswering.models.Error
"""
_attribute_map = {
@@ -127,6 +162,10 @@ class ErrorResponse(msrest.serialization.Model):
}
def __init__(self, **kwargs):
+ """
+ :keyword error: The error object.
+ :paramtype error: ~azure.ai.language.questionanswering.models.Error
+ """
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get("error", None)
@@ -136,19 +175,19 @@ class InnerErrorModel(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword code: Required. One of a server-defined set of error codes. Possible values include:
+ :ivar code: Required. One of a server-defined set of error codes. Possible values include:
"InvalidRequest", "InvalidParameterValue", "KnowledgeBaseNotFound",
"AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", "ExtractionFailure".
- :paramtype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode
- :keyword message: Required. Error message.
- :paramtype message: str
- :keyword details: Error details.
- :paramtype details: dict[str, str]
- :keyword target: Error target.
- :paramtype target: str
- :keyword innererror: An object containing more specific information than the current object
- about the error.
- :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
+ :vartype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode
+ :ivar message: Required. Error message.
+ :vartype message: str
+ :ivar details: Error details.
+ :vartype details: dict[str, str]
+ :ivar target: Error target.
+ :vartype target: str
+ :ivar innererror: An object containing more specific information than the current object about
+ the error.
+ :vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
"""
_validation = {
@@ -165,6 +204,21 @@ class InnerErrorModel(msrest.serialization.Model):
}
def __init__(self, **kwargs):
+ """
+ :keyword code: Required. One of a server-defined set of error codes. Possible values include:
+ "InvalidRequest", "InvalidParameterValue", "KnowledgeBaseNotFound",
+ "AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", "ExtractionFailure".
+ :paramtype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode
+ :keyword message: Required. Error message.
+ :paramtype message: str
+ :keyword details: Error details.
+ :paramtype details: dict[str, str]
+ :keyword target: Error target.
+ :paramtype target: str
+ :keyword innererror: An object containing more specific information than the current object
+ about the error.
+ :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
+ """
super(InnerErrorModel, self).__init__(**kwargs)
self.code = kwargs["code"]
self.message = kwargs["message"]
@@ -176,23 +230,23 @@ def __init__(self, **kwargs):
class KnowledgeBaseAnswer(msrest.serialization.Model):
"""Represents knowledge base answer.
- :keyword questions: List of questions.
- :paramtype questions: list[str]
- :keyword answer: The Answer.
- :paramtype answer: str
- :keyword confidence_score: Answer confidence score, value ranges from 0 to 1.
- :paramtype confidence_score: float
- :keyword id: ID of the QnA result.
- :paramtype id: int
- :keyword source: Source of QnA result.
- :paramtype source: str
- :keyword metadata: Metadata associated with the answer, useful to categorize or filter question
+ :ivar questions: List of questions.
+ :vartype questions: list[str]
+ :ivar answer: The Answer.
+ :vartype answer: str
+ :ivar confidence_score: Answer confidence score, value ranges from 0 to 1.
+ :vartype confidence_score: float
+ :ivar id: ID of the QnA result.
+ :vartype id: int
+ :ivar source: Source of QnA result.
+ :vartype source: str
+ :ivar metadata: Metadata associated with the answer, useful to categorize or filter question
answers.
- :paramtype metadata: dict[str, str]
- :keyword dialog: Dialog associated with Answer.
- :paramtype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog
- :keyword answer_span: Answer span object of QnA with respect to user's question.
- :paramtype answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
+ :vartype metadata: dict[str, str]
+ :ivar dialog: Dialog associated with Answer.
+ :vartype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog
+ :ivar answer_span: Answer span object of QnA with respect to user's question.
+ :vartype answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
"""
_validation = {
@@ -211,6 +265,25 @@ class KnowledgeBaseAnswer(msrest.serialization.Model):
}
def __init__(self, **kwargs):
+ """
+ :keyword questions: List of questions.
+ :paramtype questions: list[str]
+ :keyword answer: The Answer.
+ :paramtype answer: str
+ :keyword confidence_score: Answer confidence score, value ranges from 0 to 1.
+ :paramtype confidence_score: float
+ :keyword id: ID of the QnA result.
+ :paramtype id: int
+ :keyword source: Source of QnA result.
+ :paramtype source: str
+ :keyword metadata: Metadata associated with the answer, useful to categorize or filter question
+ answers.
+ :paramtype metadata: dict[str, str]
+ :keyword dialog: Dialog associated with Answer.
+ :paramtype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog
+ :keyword answer_span: Answer span object of QnA with respect to user's question.
+ :paramtype answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
+ """
super(KnowledgeBaseAnswer, self).__init__(**kwargs)
self.questions = kwargs.get("questions", None)
self.answer = kwargs.get("answer", None)
@@ -225,12 +298,12 @@ def __init__(self, **kwargs):
class KnowledgeBaseAnswerDialog(msrest.serialization.Model):
"""Dialog associated with Answer.
- :keyword is_context_only: To mark if a prompt is relevant only with a previous question or not.
- If true, do not include this QnA as search result for queries without context; otherwise, if
+ :ivar is_context_only: To mark if a prompt is relevant only with a previous question or not. If
+ true, do not include this QnA as search result for queries without context; otherwise, if
false, ignores context and includes this QnA in search result.
- :paramtype is_context_only: bool
- :keyword prompts: List of 0 to 20 prompts associated with the answer.
- :paramtype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]
+ :vartype is_context_only: bool
+ :ivar prompts: List of 0 to 20 prompts associated with the answer.
+ :vartype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]
"""
_validation = {
@@ -243,6 +316,14 @@ class KnowledgeBaseAnswerDialog(msrest.serialization.Model):
}
def __init__(self, **kwargs):
+ """
+ :keyword is_context_only: To mark if a prompt is relevant only with a previous question or not.
+ If true, do not include this QnA as search result for queries without context; otherwise, if
+ false, ignores context and includes this QnA in search result.
+ :paramtype is_context_only: bool
+ :keyword prompts: List of 0 to 20 prompts associated with the answer.
+ :paramtype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]
+ """
super(KnowledgeBaseAnswerDialog, self).__init__(**kwargs)
self.is_context_only = kwargs.get("is_context_only", None)
self.prompts = kwargs.get("prompts", None)
@@ -251,12 +332,12 @@ def __init__(self, **kwargs):
class KnowledgeBaseAnswerPrompt(msrest.serialization.Model):
"""Prompt for an answer.
- :keyword display_order: Index of the prompt - used in ordering of the prompts.
- :paramtype display_order: int
- :keyword qna_id: QnA ID corresponding to the prompt.
- :paramtype qna_id: int
- :keyword display_text: Text displayed to represent a follow up question prompt.
- :paramtype display_text: str
+ :ivar display_order: Index of the prompt - used in ordering of the prompts.
+ :vartype display_order: int
+ :ivar qna_id: QnA ID corresponding to the prompt.
+ :vartype qna_id: int
+ :ivar display_text: Text displayed to represent a follow up question prompt.
+ :vartype display_text: str
"""
_validation = {
@@ -270,6 +351,14 @@ class KnowledgeBaseAnswerPrompt(msrest.serialization.Model):
}
def __init__(self, **kwargs):
+ """
+ :keyword display_order: Index of the prompt - used in ordering of the prompts.
+ :paramtype display_order: int
+ :keyword qna_id: QnA ID corresponding to the prompt.
+ :paramtype qna_id: int
+ :keyword display_text: Text displayed to represent a follow up question prompt.
+ :paramtype display_text: str
+ """
super(KnowledgeBaseAnswerPrompt, self).__init__(**kwargs)
self.display_order = kwargs.get("display_order", None)
self.qna_id = kwargs.get("qna_id", None)
@@ -281,10 +370,10 @@ class KnowledgeBaseAnswerRequestContext(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword previous_qna_id: Required. Previous turn top answer result QnA ID.
- :paramtype previous_qna_id: int
- :keyword previous_user_query: Previous user query.
- :paramtype previous_user_query: str
+ :ivar previous_qna_id: Required. Previous turn top answer result QnA ID.
+ :vartype previous_qna_id: int
+ :ivar previous_user_query: Previous user query.
+ :vartype previous_user_query: str
"""
_validation = {
@@ -297,6 +386,12 @@ class KnowledgeBaseAnswerRequestContext(msrest.serialization.Model):
}
def __init__(self, **kwargs):
+ """
+ :keyword previous_qna_id: Required. Previous turn top answer result QnA ID.
+ :paramtype previous_qna_id: int
+ :keyword previous_user_query: Previous user query.
+ :paramtype previous_user_query: str
+ """
super(KnowledgeBaseAnswerRequestContext, self).__init__(**kwargs)
self.previous_qna_id = kwargs["previous_qna_id"]
self.previous_user_query = kwargs.get("previous_user_query", None)
@@ -305,8 +400,8 @@ def __init__(self, **kwargs):
class KnowledgeBaseAnswers(msrest.serialization.Model):
"""Represents List of Question Answers.
- :keyword answers: Represents Answer Result list.
- :paramtype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]
+ :ivar answers: Represents Answer Result list.
+ :vartype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]
"""
_attribute_map = {
@@ -314,39 +409,108 @@ class KnowledgeBaseAnswers(msrest.serialization.Model):
}
def __init__(self, **kwargs):
+ """
+ :keyword answers: Represents Answer Result list.
+ :paramtype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]
+ """
super(KnowledgeBaseAnswers, self).__init__(**kwargs)
self.answers = kwargs.get("answers", None)
-class KnowledgeBaseQueryOptions(msrest.serialization.Model):
+class MetadataFilter(msrest.serialization.Model):
+ """Find QnAs that are associated with the given list of metadata.
+
+ :ivar metadata:
+ :vartype metadata: list[tuple[str]]
+ :ivar logical_operation: Operation used to join metadata filters. Possible values include:
+ "AND", "OR".
+ :vartype logical_operation: str or
+ ~azure.ai.language.questionanswering.models.LogicalOperationKind
+ """
+
+ _attribute_map = {
+ "metadata": {"key": "metadata", "type": "[[str]]"},
+ "logical_operation": {"key": "logicalOperation", "type": "str"},
+ }
+
+ def __init__(self, **kwargs):
+ """
+ :keyword metadata:
+ :paramtype metadata: list[tuple[str]]
+ :keyword logical_operation: Operation used to join metadata filters. Possible values include:
+ "AND", "OR".
+ :paramtype logical_operation: str or
+ ~azure.ai.language.questionanswering.models.LogicalOperationKind
+ """
+ super(MetadataFilter, self).__init__(**kwargs)
+ self.metadata = kwargs.get("metadata", None)
+ self.logical_operation = kwargs.get("logical_operation", None)
+
+
+class QueryFilters(msrest.serialization.Model):
+ """filters over knowledge base.
+
+ :ivar metadata_filter: Find QnAs that are associated with the given list of metadata.
+ :vartype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter
+ :ivar source_filter: Find QnAs that are associated with the given list of sources in knowledge
+ base.
+ :vartype source_filter: list[str]
+ :ivar logical_operation: Logical operation used to join metadata filters with source filters.
+ Possible values include: "AND", "OR".
+ :vartype logical_operation: str or
+ ~azure.ai.language.questionanswering.models.LogicalOperationKind
+ """
+
+ _attribute_map = {
+ "metadata_filter": {"key": "metadataFilter", "type": "MetadataFilter"},
+ "source_filter": {"key": "sourceFilter", "type": "[str]"},
+ "logical_operation": {"key": "logicalOperation", "type": "str"},
+ }
+
+ def __init__(self, **kwargs):
+ """
+ :keyword metadata_filter: Find QnAs that are associated with the given list of metadata.
+ :paramtype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter
+ :keyword source_filter: Find QnAs that are associated with the given list of sources in
+ knowledge base.
+ :paramtype source_filter: list[str]
+ :keyword logical_operation: Logical operation used to join metadata filters with source
+ filters. Possible values include: "AND", "OR".
+ :paramtype logical_operation: str or
+ ~azure.ai.language.questionanswering.models.LogicalOperationKind
+ """
+ super(QueryFilters, self).__init__(**kwargs)
+ self.metadata_filter = kwargs.get("metadata_filter", None)
+ self.source_filter = kwargs.get("source_filter", None)
+ self.logical_operation = kwargs.get("logical_operation", None)
+
+
+class QueryKnowledgeBaseOptions(msrest.serialization.Model):
"""The question parameters to answer using a knowledge base.
- :keyword qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over
+ :ivar qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over
question.
- :paramtype qna_id: int
- :keyword question: User question to query against the knowledge base.
- :paramtype question: str
- :keyword top: Max number of answers to be returned for the question.
- :paramtype top: int
- :keyword user_id: Unique identifier for the user.
- :paramtype user_id: str
- :keyword confidence_score_threshold: Minimum threshold score for answers, value ranges from 0
- to 1.
- :paramtype confidence_score_threshold: float
- :keyword context: Context object with previous QnA's information.
- :paramtype context:
- ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerRequestContext
- :keyword ranker_type: (Optional) Set to 'QuestionOnly' for using a question only Ranker.
- Possible values include: "Default", "QuestionOnly".
- :paramtype ranker_type: str or ~azure.ai.language.questionanswering.models.RankerType
- :keyword strict_filters: Filter QnAs based on give metadata list and knowledge base source
- names.
- :paramtype strict_filters: ~azure.ai.language.questionanswering.models.StrictFilters
- :keyword answer_span_request: To configure Answer span prediction feature.
- :paramtype answer_span_request: ~azure.ai.language.questionanswering.models.AnswerSpanRequest
- :keyword include_unstructured_sources: (Optional) Flag to enable Query over Unstructured
- Sources.
- :paramtype include_unstructured_sources: bool
+ :vartype qna_id: int
+ :ivar question: User question to query against the knowledge base.
+ :vartype question: str
+ :ivar top: Max number of answers to be returned for the question.
+ :vartype top: int
+ :ivar user_id: Unique identifier for the user.
+ :vartype user_id: str
+ :ivar confidence_score_threshold: Minimum threshold score for answers, value ranges from 0 to
+ 1.
+ :vartype confidence_score_threshold: float
+ :ivar context: Context object with previous QnA's information.
+ :vartype context: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerRequestContext
+ :ivar ranker_type: (Optional) Set to 'QuestionOnly' for using a question only Ranker. Possible
+ values include: "Default", "QuestionOnly".
+ :vartype ranker_type: str or ~azure.ai.language.questionanswering.models.RankerType
+ :ivar filters: Filter QnAs based on give metadata list and knowledge base source names.
+ :vartype filters: ~azure.ai.language.questionanswering.models.QueryFilters
+ :ivar answer_span_request: To configure Answer span prediction feature.
+ :vartype answer_span_request: ~azure.ai.language.questionanswering.models.AnswerSpanRequest
+ :ivar include_unstructured_sources: (Optional) Flag to enable Query over Unstructured Sources.
+ :vartype include_unstructured_sources: bool
"""
_validation = {
@@ -361,13 +525,40 @@ class KnowledgeBaseQueryOptions(msrest.serialization.Model):
"confidence_score_threshold": {"key": "confidenceScoreThreshold", "type": "float"},
"context": {"key": "context", "type": "KnowledgeBaseAnswerRequestContext"},
"ranker_type": {"key": "rankerType", "type": "str"},
- "strict_filters": {"key": "strictFilters", "type": "StrictFilters"},
+ "filters": {"key": "filters", "type": "QueryFilters"},
"answer_span_request": {"key": "answerSpanRequest", "type": "AnswerSpanRequest"},
"include_unstructured_sources": {"key": "includeUnstructuredSources", "type": "bool"},
}
def __init__(self, **kwargs):
- super(KnowledgeBaseQueryOptions, self).__init__(**kwargs)
+ """
+ :keyword qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over
+ question.
+ :paramtype qna_id: int
+ :keyword question: User question to query against the knowledge base.
+ :paramtype question: str
+ :keyword top: Max number of answers to be returned for the question.
+ :paramtype top: int
+ :keyword user_id: Unique identifier for the user.
+ :paramtype user_id: str
+ :keyword confidence_score_threshold: Minimum threshold score for answers, value ranges from 0
+ to 1.
+ :paramtype confidence_score_threshold: float
+ :keyword context: Context object with previous QnA's information.
+ :paramtype context:
+ ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerRequestContext
+ :keyword ranker_type: (Optional) Set to 'QuestionOnly' for using a question only Ranker.
+ Possible values include: "Default", "QuestionOnly".
+ :paramtype ranker_type: str or ~azure.ai.language.questionanswering.models.RankerType
+ :keyword filters: Filter QnAs based on give metadata list and knowledge base source names.
+ :paramtype filters: ~azure.ai.language.questionanswering.models.QueryFilters
+ :keyword answer_span_request: To configure Answer span prediction feature.
+ :paramtype answer_span_request: ~azure.ai.language.questionanswering.models.AnswerSpanRequest
+ :keyword include_unstructured_sources: (Optional) Flag to enable Query over Unstructured
+ Sources.
+ :paramtype include_unstructured_sources: bool
+ """
+ super(QueryKnowledgeBaseOptions, self).__init__(**kwargs)
self.qna_id = kwargs.get("qna_id", None)
self.question = kwargs.get("question", None)
self.top = kwargs.get("top", None)
@@ -375,75 +566,71 @@ def __init__(self, **kwargs):
self.confidence_score_threshold = kwargs.get("confidence_score_threshold", None)
self.context = kwargs.get("context", None)
self.ranker_type = kwargs.get("ranker_type", None)
- self.strict_filters = kwargs.get("strict_filters", None)
+ self.filters = kwargs.get("filters", None)
self.answer_span_request = kwargs.get("answer_span_request", None)
self.include_unstructured_sources = kwargs.get("include_unstructured_sources", None)
-class MetadataFilter(msrest.serialization.Model):
- """Find QnAs that are associated with the given list of metadata.
+class QueryTextOptions(msrest.serialization.Model):
+ """The question and text record parameters to answer.
- :keyword metadata: Dictionary of :code:``.
- :paramtype metadata: dict[str, str]
- :keyword compound_operation: (Optional) Set to 'OR' for joining metadata using 'OR' operation.
- Possible values include: "AND", "OR".
- :paramtype compound_operation: str or
- ~azure.ai.language.questionanswering.models.CompoundOperationKind
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar question: Required. User question to query against the given text records.
+ :vartype question: str
+ :ivar records: Required. Text records to be searched for given question.
+ :vartype records: list[~azure.ai.language.questionanswering.models.TextRecord]
+ :ivar language: Language of the text records. This is BCP-47 representation of a language. For
+ example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
+ default.
+ :vartype language: str
"""
- _attribute_map = {
- "metadata": {"key": "metadata", "type": "{str}"},
- "compound_operation": {"key": "compoundOperation", "type": "str"},
+ _validation = {
+ "question": {"required": True},
+ "records": {"required": True},
}
- def __init__(self, **kwargs):
- super(MetadataFilter, self).__init__(**kwargs)
- self.metadata = kwargs.get("metadata", None)
- self.compound_operation = kwargs.get("compound_operation", None)
-
-
-class StrictFilters(msrest.serialization.Model):
- """filters over knowledge base.
-
- :keyword metadata_filter: Find QnAs that are associated with the given list of metadata.
- :paramtype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter
- :keyword source_filter: Find QnAs that are associated with the given list of sources in
- knowledge base.
- :paramtype source_filter: list[str]
- :keyword compound_operation: (Optional) Set to 'OR' for joining metadata using 'OR' operation.
- Possible values include: "AND", "OR".
- :paramtype compound_operation: str or
- ~azure.ai.language.questionanswering.models.CompoundOperationKind
- """
-
_attribute_map = {
- "metadata_filter": {"key": "metadataFilter", "type": "MetadataFilter"},
- "source_filter": {"key": "sourceFilter", "type": "[str]"},
- "compound_operation": {"key": "compoundOperation", "type": "str"},
+ "question": {"key": "question", "type": "str"},
+ "records": {"key": "records", "type": "[TextRecord]"},
+ "language": {"key": "language", "type": "str"},
+ "string_index_type": {"key": "stringIndexType", "type": "str"},
}
def __init__(self, **kwargs):
- super(StrictFilters, self).__init__(**kwargs)
- self.metadata_filter = kwargs.get("metadata_filter", None)
- self.source_filter = kwargs.get("source_filter", None)
- self.compound_operation = kwargs.get("compound_operation", None)
+ """
+ :keyword question: Required. User question to query against the given text records.
+ :paramtype question: str
+ :keyword records: Required. Text records to be searched for given question.
+ :paramtype records: list[~azure.ai.language.questionanswering.models.TextRecord]
+ :keyword language: Language of the text records. This is BCP-47 representation of a language.
+ For example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
+ default.
+ :paramtype language: str
+ """
+ super(QueryTextOptions, self).__init__(**kwargs)
+ self.question = kwargs["question"]
+ self.records = kwargs["records"]
+ self.language = kwargs.get("language", None)
+ self.string_index_type = "UnicodeCodePoint"
class TextAnswer(msrest.serialization.Model):
"""Represents answer result.
- :keyword answer: Answer.
- :paramtype answer: str
- :keyword confidence_score: answer confidence score, value ranges from 0 to 1.
- :paramtype confidence_score: float
- :keyword id: record ID.
- :paramtype id: str
- :keyword answer_span: Answer span object with respect to user's question.
- :paramtype answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
- :keyword offset: The sentence offset from the start of the document.
- :paramtype offset: int
- :keyword length: The length of the sentence.
- :paramtype length: int
+ :ivar answer: Answer.
+ :vartype answer: str
+ :ivar confidence_score: answer confidence score, value ranges from 0 to 1.
+ :vartype confidence_score: float
+ :ivar id: record ID.
+ :vartype id: str
+ :ivar answer_span: Answer span object with respect to user's question.
+ :vartype answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
+ :ivar offset: The sentence offset from the start of the document.
+ :vartype offset: int
+ :ivar length: The length of the sentence.
+ :vartype length: int
"""
_validation = {
@@ -460,6 +647,20 @@ class TextAnswer(msrest.serialization.Model):
}
def __init__(self, **kwargs):
+ """
+ :keyword answer: Answer.
+ :paramtype answer: str
+ :keyword confidence_score: answer confidence score, value ranges from 0 to 1.
+ :paramtype confidence_score: float
+ :keyword id: record ID.
+ :paramtype id: str
+ :keyword answer_span: Answer span object with respect to user's question.
+ :paramtype answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
+ :keyword offset: The sentence offset from the start of the document.
+ :paramtype offset: int
+ :keyword length: The length of the sentence.
+ :paramtype length: int
+ """
super(TextAnswer, self).__init__(**kwargs)
self.answer = kwargs.get("answer", None)
self.confidence_score = kwargs.get("confidence_score", None)
@@ -472,8 +673,8 @@ def __init__(self, **kwargs):
class TextAnswers(msrest.serialization.Model):
"""Represents the answer results.
- :keyword answers: Represents the answer results.
- :paramtype answers: list[~azure.ai.language.questionanswering.models.TextAnswer]
+ :ivar answers: Represents the answer results.
+ :vartype answers: list[~azure.ai.language.questionanswering.models.TextAnswer]
"""
_attribute_map = {
@@ -481,60 +682,23 @@ class TextAnswers(msrest.serialization.Model):
}
def __init__(self, **kwargs):
+ """
+ :keyword answers: Represents the answer results.
+ :paramtype answers: list[~azure.ai.language.questionanswering.models.TextAnswer]
+ """
super(TextAnswers, self).__init__(**kwargs)
self.answers = kwargs.get("answers", None)
-class TextQueryOptions(msrest.serialization.Model):
- """The question and text record parameters to answer.
-
- All required parameters must be populated in order to send to Azure.
-
- :keyword question: Required. User question to query against the given text records.
- :paramtype question: str
- :keyword records: Required. Text records to be searched for given question.
- :paramtype records: list[~azure.ai.language.questionanswering.models.TextRecord]
- :keyword language: Language of the text records. This is BCP-47 representation of a language.
- For example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
- default.
- :paramtype language: str
- :keyword string_index_type: Specifies the method used to interpret string offsets. Defaults to
- Text Elements (Graphemes) according to Unicode v8.0.0. For additional information see
- https://aka.ms/text-analytics-offsets. Possible values include: "TextElements_v8",
- "UnicodeCodePoint", "Utf16CodeUnit". Default value: "TextElements_v8".
- :paramtype string_index_type: str or
- ~azure.ai.language.questionanswering.models.StringIndexType
- """
-
- _validation = {
- "question": {"required": True},
- "records": {"required": True},
- }
-
- _attribute_map = {
- "question": {"key": "question", "type": "str"},
- "records": {"key": "records", "type": "[TextRecord]"},
- "language": {"key": "language", "type": "str"},
- "string_index_type": {"key": "stringIndexType", "type": "str"},
- }
-
- def __init__(self, **kwargs):
- super(TextQueryOptions, self).__init__(**kwargs)
- self.question = kwargs["question"]
- self.records = kwargs["records"]
- self.language = kwargs.get("language", None)
- self.string_index_type = kwargs.get("string_index_type", "TextElements_v8")
-
-
class TextRecord(msrest.serialization.Model):
"""Represent input text record to be queried.
All required parameters must be populated in order to send to Azure.
- :keyword id: Required. Unique identifier for the text record.
- :paramtype id: str
- :keyword text: Required. Text contents of the record.
- :paramtype text: str
+ :ivar id: Required. Unique identifier for the text record.
+ :vartype id: str
+ :ivar text: Required. Text contents of the record.
+ :vartype text: str
"""
_validation = {
@@ -548,6 +712,12 @@ class TextRecord(msrest.serialization.Model):
}
def __init__(self, **kwargs):
+ """
+ :keyword id: Required. Unique identifier for the text record.
+ :paramtype id: str
+ :keyword text: Required. Text contents of the record.
+ :paramtype text: str
+ """
super(TextRecord, self).__init__(**kwargs)
self.id = kwargs["id"]
self.text = kwargs["text"]
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models_py3.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models_py3.py
index e7b7507de720..83829e675a0d 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models_py3.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models_py3.py
@@ -6,7 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
-from typing import Dict, List, Optional, Union
+from typing import Dict, List, Optional, Tuple, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
@@ -17,14 +17,14 @@
class AnswerSpan(msrest.serialization.Model):
"""Answer span object of QnA.
- :keyword text: Predicted text of answer span.
- :paramtype text: str
- :keyword confidence_score: Predicted score of answer span, value ranges from 0 to 1.
- :paramtype confidence_score: float
- :keyword offset: The answer span offset from the start of answer.
- :paramtype offset: int
- :keyword length: The length of the answer span.
- :paramtype length: int
+ :ivar text: Predicted text of answer span.
+ :vartype text: str
+ :ivar confidence_score: Predicted score of answer span, value ranges from 0 to 1.
+ :vartype confidence_score: float
+ :ivar offset: The answer span offset from the start of answer.
+ :vartype offset: int
+ :ivar length: The length of the answer span.
+ :vartype length: int
"""
_validation = {
@@ -47,6 +47,16 @@ def __init__(
length: Optional[int] = None,
**kwargs
):
+ """
+ :keyword text: Predicted text of answer span.
+ :paramtype text: str
+ :keyword confidence_score: Predicted score of answer span, value ranges from 0 to 1.
+ :paramtype confidence_score: float
+ :keyword offset: The answer span offset from the start of answer.
+ :paramtype offset: int
+ :keyword length: The length of the answer span.
+ :paramtype length: int
+ """
super(AnswerSpan, self).__init__(**kwargs)
self.text = text
self.confidence_score = confidence_score
@@ -57,14 +67,14 @@ def __init__(
class AnswerSpanRequest(msrest.serialization.Model):
"""To configure Answer span prediction feature.
- :keyword enable: Enable or disable Answer Span prediction.
- :paramtype enable: bool
- :keyword confidence_score_threshold: Minimum threshold score required to include an answer
- span, value ranges from 0 to 1.
- :paramtype confidence_score_threshold: float
- :keyword top_answers_with_span: Number of Top answers to be considered for span prediction from
- 1 to 10.
- :paramtype top_answers_with_span: int
+ :ivar enable: Enable or disable Answer Span prediction.
+ :vartype enable: bool
+ :ivar confidence_score_threshold: Minimum threshold score required to include an answer span,
+ value ranges from 0 to 1.
+ :vartype confidence_score_threshold: float
+ :ivar top_answers_with_span: Number of Top answers to be considered for span prediction from 1
+ to 10.
+ :vartype top_answers_with_span: int
"""
_validation = {
@@ -86,6 +96,16 @@ def __init__(
top_answers_with_span: Optional[int] = None,
**kwargs
):
+ """
+ :keyword enable: Enable or disable Answer Span prediction.
+ :paramtype enable: bool
+ :keyword confidence_score_threshold: Minimum threshold score required to include an answer
+ span, value ranges from 0 to 1.
+ :paramtype confidence_score_threshold: float
+ :keyword top_answers_with_span: Number of Top answers to be considered for span prediction from
+ 1 to 10.
+ :paramtype top_answers_with_span: int
+ """
super(AnswerSpanRequest, self).__init__(**kwargs)
self.enable = enable
self.confidence_score_threshold = confidence_score_threshold
@@ -97,19 +117,19 @@ class Error(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword code: Required. One of a server-defined set of error codes. Possible values include:
+ :ivar code: Required. One of a server-defined set of error codes. Possible values include:
"InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound",
"TooManyRequests", "InternalServerError", "ServiceUnavailable".
- :paramtype code: str or ~azure.ai.language.questionanswering.models.ErrorCode
- :keyword message: Required. A human-readable representation of the error.
- :paramtype message: str
- :keyword target: The target of the error.
- :paramtype target: str
- :keyword details: An array of details about specific errors that led to this reported error.
- :paramtype details: list[~azure.ai.language.questionanswering.models.Error]
- :keyword innererror: An object containing more specific information than the current object
- about the error.
- :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
+ :vartype code: str or ~azure.ai.language.questionanswering.models.ErrorCode
+ :ivar message: Required. A human-readable representation of the error.
+ :vartype message: str
+ :ivar target: The target of the error.
+ :vartype target: str
+ :ivar details: An array of details about specific errors that led to this reported error.
+ :vartype details: list[~azure.ai.language.questionanswering.models.Error]
+ :ivar innererror: An object containing more specific information than the current object about
+ the error.
+ :vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
"""
_validation = {
@@ -135,6 +155,21 @@ def __init__(
innererror: Optional["InnerErrorModel"] = None,
**kwargs
):
+ """
+ :keyword code: Required. One of a server-defined set of error codes. Possible values include:
+ "InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound",
+ "TooManyRequests", "InternalServerError", "ServiceUnavailable".
+ :paramtype code: str or ~azure.ai.language.questionanswering.models.ErrorCode
+ :keyword message: Required. A human-readable representation of the error.
+ :paramtype message: str
+ :keyword target: The target of the error.
+ :paramtype target: str
+ :keyword details: An array of details about specific errors that led to this reported error.
+ :paramtype details: list[~azure.ai.language.questionanswering.models.Error]
+ :keyword innererror: An object containing more specific information than the current object
+ about the error.
+ :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
+ """
super(Error, self).__init__(**kwargs)
self.code = code
self.message = message
@@ -146,8 +181,8 @@ def __init__(
class ErrorResponse(msrest.serialization.Model):
"""Error response.
- :keyword error: The error object.
- :paramtype error: ~azure.ai.language.questionanswering.models.Error
+ :ivar error: The error object.
+ :vartype error: ~azure.ai.language.questionanswering.models.Error
"""
_attribute_map = {
@@ -155,6 +190,10 @@ class ErrorResponse(msrest.serialization.Model):
}
def __init__(self, *, error: Optional["Error"] = None, **kwargs):
+ """
+ :keyword error: The error object.
+ :paramtype error: ~azure.ai.language.questionanswering.models.Error
+ """
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
@@ -164,19 +203,19 @@ class InnerErrorModel(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword code: Required. One of a server-defined set of error codes. Possible values include:
+ :ivar code: Required. One of a server-defined set of error codes. Possible values include:
"InvalidRequest", "InvalidParameterValue", "KnowledgeBaseNotFound",
"AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", "ExtractionFailure".
- :paramtype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode
- :keyword message: Required. Error message.
- :paramtype message: str
- :keyword details: Error details.
- :paramtype details: dict[str, str]
- :keyword target: Error target.
- :paramtype target: str
- :keyword innererror: An object containing more specific information than the current object
- about the error.
- :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
+ :vartype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode
+ :ivar message: Required. Error message.
+ :vartype message: str
+ :ivar details: Error details.
+ :vartype details: dict[str, str]
+ :ivar target: Error target.
+ :vartype target: str
+ :ivar innererror: An object containing more specific information than the current object about
+ the error.
+ :vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
"""
_validation = {
@@ -202,6 +241,21 @@ def __init__(
innererror: Optional["InnerErrorModel"] = None,
**kwargs
):
+ """
+ :keyword code: Required. One of a server-defined set of error codes. Possible values include:
+ "InvalidRequest", "InvalidParameterValue", "KnowledgeBaseNotFound",
+ "AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", "ExtractionFailure".
+ :paramtype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode
+ :keyword message: Required. Error message.
+ :paramtype message: str
+ :keyword details: Error details.
+ :paramtype details: dict[str, str]
+ :keyword target: Error target.
+ :paramtype target: str
+ :keyword innererror: An object containing more specific information than the current object
+ about the error.
+ :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
+ """
super(InnerErrorModel, self).__init__(**kwargs)
self.code = code
self.message = message
@@ -213,23 +267,23 @@ def __init__(
class KnowledgeBaseAnswer(msrest.serialization.Model):
"""Represents knowledge base answer.
- :keyword questions: List of questions.
- :paramtype questions: list[str]
- :keyword answer: The Answer.
- :paramtype answer: str
- :keyword confidence_score: Answer confidence score, value ranges from 0 to 1.
- :paramtype confidence_score: float
- :keyword id: ID of the QnA result.
- :paramtype id: int
- :keyword source: Source of QnA result.
- :paramtype source: str
- :keyword metadata: Metadata associated with the answer, useful to categorize or filter question
+ :ivar questions: List of questions.
+ :vartype questions: list[str]
+ :ivar answer: The Answer.
+ :vartype answer: str
+ :ivar confidence_score: Answer confidence score, value ranges from 0 to 1.
+ :vartype confidence_score: float
+ :ivar id: ID of the QnA result.
+ :vartype id: int
+ :ivar source: Source of QnA result.
+ :vartype source: str
+ :ivar metadata: Metadata associated with the answer, useful to categorize or filter question
answers.
- :paramtype metadata: dict[str, str]
- :keyword dialog: Dialog associated with Answer.
- :paramtype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog
- :keyword answer_span: Answer span object of QnA with respect to user's question.
- :paramtype answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
+ :vartype metadata: dict[str, str]
+ :ivar dialog: Dialog associated with Answer.
+ :vartype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog
+ :ivar answer_span: Answer span object of QnA with respect to user's question.
+ :vartype answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
"""
_validation = {
@@ -260,6 +314,25 @@ def __init__(
answer_span: Optional["AnswerSpan"] = None,
**kwargs
):
+ """
+ :keyword questions: List of questions.
+ :paramtype questions: list[str]
+ :keyword answer: The Answer.
+ :paramtype answer: str
+ :keyword confidence_score: Answer confidence score, value ranges from 0 to 1.
+ :paramtype confidence_score: float
+ :keyword id: ID of the QnA result.
+ :paramtype id: int
+ :keyword source: Source of QnA result.
+ :paramtype source: str
+ :keyword metadata: Metadata associated with the answer, useful to categorize or filter question
+ answers.
+ :paramtype metadata: dict[str, str]
+ :keyword dialog: Dialog associated with Answer.
+ :paramtype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog
+ :keyword answer_span: Answer span object of QnA with respect to user's question.
+ :paramtype answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
+ """
super(KnowledgeBaseAnswer, self).__init__(**kwargs)
self.questions = questions
self.answer = answer
@@ -274,12 +347,12 @@ def __init__(
class KnowledgeBaseAnswerDialog(msrest.serialization.Model):
"""Dialog associated with Answer.
- :keyword is_context_only: To mark if a prompt is relevant only with a previous question or not.
- If true, do not include this QnA as search result for queries without context; otherwise, if
+ :ivar is_context_only: To mark if a prompt is relevant only with a previous question or not. If
+ true, do not include this QnA as search result for queries without context; otherwise, if
false, ignores context and includes this QnA in search result.
- :paramtype is_context_only: bool
- :keyword prompts: List of 0 to 20 prompts associated with the answer.
- :paramtype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]
+ :vartype is_context_only: bool
+ :ivar prompts: List of 0 to 20 prompts associated with the answer.
+ :vartype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]
"""
_validation = {
@@ -298,6 +371,14 @@ def __init__(
prompts: Optional[List["KnowledgeBaseAnswerPrompt"]] = None,
**kwargs
):
+ """
+ :keyword is_context_only: To mark if a prompt is relevant only with a previous question or not.
+ If true, do not include this QnA as search result for queries without context; otherwise, if
+ false, ignores context and includes this QnA in search result.
+ :paramtype is_context_only: bool
+ :keyword prompts: List of 0 to 20 prompts associated with the answer.
+ :paramtype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]
+ """
super(KnowledgeBaseAnswerDialog, self).__init__(**kwargs)
self.is_context_only = is_context_only
self.prompts = prompts
@@ -306,12 +387,12 @@ def __init__(
class KnowledgeBaseAnswerPrompt(msrest.serialization.Model):
"""Prompt for an answer.
- :keyword display_order: Index of the prompt - used in ordering of the prompts.
- :paramtype display_order: int
- :keyword qna_id: QnA ID corresponding to the prompt.
- :paramtype qna_id: int
- :keyword display_text: Text displayed to represent a follow up question prompt.
- :paramtype display_text: str
+ :ivar display_order: Index of the prompt - used in ordering of the prompts.
+ :vartype display_order: int
+ :ivar qna_id: QnA ID corresponding to the prompt.
+ :vartype qna_id: int
+ :ivar display_text: Text displayed to represent a follow up question prompt.
+ :vartype display_text: str
"""
_validation = {
@@ -332,6 +413,14 @@ def __init__(
display_text: Optional[str] = None,
**kwargs
):
+ """
+ :keyword display_order: Index of the prompt - used in ordering of the prompts.
+ :paramtype display_order: int
+ :keyword qna_id: QnA ID corresponding to the prompt.
+ :paramtype qna_id: int
+ :keyword display_text: Text displayed to represent a follow up question prompt.
+ :paramtype display_text: str
+ """
super(KnowledgeBaseAnswerPrompt, self).__init__(**kwargs)
self.display_order = display_order
self.qna_id = qna_id
@@ -343,10 +432,10 @@ class KnowledgeBaseAnswerRequestContext(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword previous_qna_id: Required. Previous turn top answer result QnA ID.
- :paramtype previous_qna_id: int
- :keyword previous_user_query: Previous user query.
- :paramtype previous_user_query: str
+ :ivar previous_qna_id: Required. Previous turn top answer result QnA ID.
+ :vartype previous_qna_id: int
+ :ivar previous_user_query: Previous user query.
+ :vartype previous_user_query: str
"""
_validation = {
@@ -359,6 +448,12 @@ class KnowledgeBaseAnswerRequestContext(msrest.serialization.Model):
}
def __init__(self, *, previous_qna_id: int, previous_user_query: Optional[str] = None, **kwargs):
+ """
+ :keyword previous_qna_id: Required. Previous turn top answer result QnA ID.
+ :paramtype previous_qna_id: int
+ :keyword previous_user_query: Previous user query.
+ :paramtype previous_user_query: str
+ """
super(KnowledgeBaseAnswerRequestContext, self).__init__(**kwargs)
self.previous_qna_id = previous_qna_id
self.previous_user_query = previous_user_query
@@ -367,8 +462,8 @@ def __init__(self, *, previous_qna_id: int, previous_user_query: Optional[str] =
class KnowledgeBaseAnswers(msrest.serialization.Model):
"""Represents List of Question Answers.
- :keyword answers: Represents Answer Result list.
- :paramtype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]
+ :ivar answers: Represents Answer Result list.
+ :vartype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]
"""
_attribute_map = {
@@ -376,39 +471,121 @@ class KnowledgeBaseAnswers(msrest.serialization.Model):
}
def __init__(self, *, answers: Optional[List["KnowledgeBaseAnswer"]] = None, **kwargs):
+ """
+ :keyword answers: Represents Answer Result list.
+ :paramtype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]
+ """
super(KnowledgeBaseAnswers, self).__init__(**kwargs)
self.answers = answers
-class KnowledgeBaseQueryOptions(msrest.serialization.Model):
+class MetadataFilter(msrest.serialization.Model):
+ """Find QnAs that are associated with the given list of metadata.
+
+ :ivar metadata:
+ :vartype metadata: list[tuple[str]]
+ :ivar logical_operation: Operation used to join metadata filters. Possible values include:
+ "AND", "OR".
+ :vartype logical_operation: str or
+ ~azure.ai.language.questionanswering.models.LogicalOperationKind
+ """
+
+ _attribute_map = {
+ "metadata": {"key": "metadata", "type": "[[str]]"},
+ "logical_operation": {"key": "logicalOperation", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metadata: Optional[List[Tuple[str]]] = None,
+ logical_operation: Optional[Union[str, "LogicalOperationKind"]] = None,
+ **kwargs
+ ):
+ """
+ :keyword metadata:
+ :paramtype metadata: list[tuple[str]]
+ :keyword logical_operation: Operation used to join metadata filters. Possible values include:
+ "AND", "OR".
+ :paramtype logical_operation: str or
+ ~azure.ai.language.questionanswering.models.LogicalOperationKind
+ """
+ super(MetadataFilter, self).__init__(**kwargs)
+ self.metadata = metadata
+ self.logical_operation = logical_operation
+
+
+class QueryFilters(msrest.serialization.Model):
+ """filters over knowledge base.
+
+ :ivar metadata_filter: Find QnAs that are associated with the given list of metadata.
+ :vartype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter
+ :ivar source_filter: Find QnAs that are associated with the given list of sources in knowledge
+ base.
+ :vartype source_filter: list[str]
+ :ivar logical_operation: Logical operation used to join metadata filters with source filters.
+ Possible values include: "AND", "OR".
+ :vartype logical_operation: str or
+ ~azure.ai.language.questionanswering.models.LogicalOperationKind
+ """
+
+ _attribute_map = {
+ "metadata_filter": {"key": "metadataFilter", "type": "MetadataFilter"},
+ "source_filter": {"key": "sourceFilter", "type": "[str]"},
+ "logical_operation": {"key": "logicalOperation", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metadata_filter: Optional["MetadataFilter"] = None,
+ source_filter: Optional[List[str]] = None,
+ logical_operation: Optional[Union[str, "LogicalOperationKind"]] = None,
+ **kwargs
+ ):
+ """
+ :keyword metadata_filter: Find QnAs that are associated with the given list of metadata.
+ :paramtype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter
+ :keyword source_filter: Find QnAs that are associated with the given list of sources in
+ knowledge base.
+ :paramtype source_filter: list[str]
+ :keyword logical_operation: Logical operation used to join metadata filters with source
+ filters. Possible values include: "AND", "OR".
+ :paramtype logical_operation: str or
+ ~azure.ai.language.questionanswering.models.LogicalOperationKind
+ """
+ super(QueryFilters, self).__init__(**kwargs)
+ self.metadata_filter = metadata_filter
+ self.source_filter = source_filter
+ self.logical_operation = logical_operation
+
+
+class QueryKnowledgeBaseOptions(msrest.serialization.Model):
"""The question parameters to answer using a knowledge base.
- :keyword qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over
+ :ivar qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over
question.
- :paramtype qna_id: int
- :keyword question: User question to query against the knowledge base.
- :paramtype question: str
- :keyword top: Max number of answers to be returned for the question.
- :paramtype top: int
- :keyword user_id: Unique identifier for the user.
- :paramtype user_id: str
- :keyword confidence_score_threshold: Minimum threshold score for answers, value ranges from 0
- to 1.
- :paramtype confidence_score_threshold: float
- :keyword context: Context object with previous QnA's information.
- :paramtype context:
- ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerRequestContext
- :keyword ranker_type: (Optional) Set to 'QuestionOnly' for using a question only Ranker.
- Possible values include: "Default", "QuestionOnly".
- :paramtype ranker_type: str or ~azure.ai.language.questionanswering.models.RankerType
- :keyword strict_filters: Filter QnAs based on give metadata list and knowledge base source
- names.
- :paramtype strict_filters: ~azure.ai.language.questionanswering.models.StrictFilters
- :keyword answer_span_request: To configure Answer span prediction feature.
- :paramtype answer_span_request: ~azure.ai.language.questionanswering.models.AnswerSpanRequest
- :keyword include_unstructured_sources: (Optional) Flag to enable Query over Unstructured
- Sources.
- :paramtype include_unstructured_sources: bool
+ :vartype qna_id: int
+ :ivar question: User question to query against the knowledge base.
+ :vartype question: str
+ :ivar top: Max number of answers to be returned for the question.
+ :vartype top: int
+ :ivar user_id: Unique identifier for the user.
+ :vartype user_id: str
+ :ivar confidence_score_threshold: Minimum threshold score for answers, value ranges from 0 to
+ 1.
+ :vartype confidence_score_threshold: float
+ :ivar context: Context object with previous QnA's information.
+ :vartype context: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerRequestContext
+ :ivar ranker_type: (Optional) Set to 'QuestionOnly' for using a question only Ranker. Possible
+ values include: "Default", "QuestionOnly".
+ :vartype ranker_type: str or ~azure.ai.language.questionanswering.models.RankerType
+ :ivar filters: Filter QnAs based on give metadata list and knowledge base source names.
+ :vartype filters: ~azure.ai.language.questionanswering.models.QueryFilters
+ :ivar answer_span_request: To configure Answer span prediction feature.
+ :vartype answer_span_request: ~azure.ai.language.questionanswering.models.AnswerSpanRequest
+ :ivar include_unstructured_sources: (Optional) Flag to enable Query over Unstructured Sources.
+ :vartype include_unstructured_sources: bool
"""
_validation = {
@@ -423,7 +600,7 @@ class KnowledgeBaseQueryOptions(msrest.serialization.Model):
"confidence_score_threshold": {"key": "confidenceScoreThreshold", "type": "float"},
"context": {"key": "context", "type": "KnowledgeBaseAnswerRequestContext"},
"ranker_type": {"key": "rankerType", "type": "str"},
- "strict_filters": {"key": "strictFilters", "type": "StrictFilters"},
+ "filters": {"key": "filters", "type": "QueryFilters"},
"answer_span_request": {"key": "answerSpanRequest", "type": "AnswerSpanRequest"},
"include_unstructured_sources": {"key": "includeUnstructuredSources", "type": "bool"},
}
@@ -438,12 +615,39 @@ def __init__(
confidence_score_threshold: Optional[float] = None,
context: Optional["KnowledgeBaseAnswerRequestContext"] = None,
ranker_type: Optional[Union[str, "RankerType"]] = None,
- strict_filters: Optional["StrictFilters"] = None,
+ filters: Optional["QueryFilters"] = None,
answer_span_request: Optional["AnswerSpanRequest"] = None,
include_unstructured_sources: Optional[bool] = None,
**kwargs
):
- super(KnowledgeBaseQueryOptions, self).__init__(**kwargs)
+ """
+ :keyword qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over
+ question.
+ :paramtype qna_id: int
+ :keyword question: User question to query against the knowledge base.
+ :paramtype question: str
+ :keyword top: Max number of answers to be returned for the question.
+ :paramtype top: int
+ :keyword user_id: Unique identifier for the user.
+ :paramtype user_id: str
+ :keyword confidence_score_threshold: Minimum threshold score for answers, value ranges from 0
+ to 1.
+ :paramtype confidence_score_threshold: float
+ :keyword context: Context object with previous QnA's information.
+ :paramtype context:
+ ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerRequestContext
+ :keyword ranker_type: (Optional) Set to 'QuestionOnly' for using a question only Ranker.
+ Possible values include: "Default", "QuestionOnly".
+ :paramtype ranker_type: str or ~azure.ai.language.questionanswering.models.RankerType
+ :keyword filters: Filter QnAs based on give metadata list and knowledge base source names.
+ :paramtype filters: ~azure.ai.language.questionanswering.models.QueryFilters
+ :keyword answer_span_request: To configure Answer span prediction feature.
+ :paramtype answer_span_request: ~azure.ai.language.questionanswering.models.AnswerSpanRequest
+ :keyword include_unstructured_sources: (Optional) Flag to enable Query over Unstructured
+ Sources.
+ :paramtype include_unstructured_sources: bool
+ """
+ super(QueryKnowledgeBaseOptions, self).__init__(**kwargs)
self.qna_id = qna_id
self.question = question
self.top = top
@@ -451,88 +655,71 @@ def __init__(
self.confidence_score_threshold = confidence_score_threshold
self.context = context
self.ranker_type = ranker_type
- self.strict_filters = strict_filters
+ self.filters = filters
self.answer_span_request = answer_span_request
self.include_unstructured_sources = include_unstructured_sources
-class MetadataFilter(msrest.serialization.Model):
- """Find QnAs that are associated with the given list of metadata.
+class QueryTextOptions(msrest.serialization.Model):
+ """The question and text record parameters to answer.
- :keyword metadata: Dictionary of :code:``.
- :paramtype metadata: dict[str, str]
- :keyword compound_operation: (Optional) Set to 'OR' for joining metadata using 'OR' operation.
- Possible values include: "AND", "OR".
- :paramtype compound_operation: str or
- ~azure.ai.language.questionanswering.models.CompoundOperationKind
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar question: Required. User question to query against the given text records.
+ :vartype question: str
+ :ivar records: Required. Text records to be searched for given question.
+ :vartype records: list[~azure.ai.language.questionanswering.models.TextRecord]
+ :ivar language: Language of the text records. This is BCP-47 representation of a language. For
+ example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
+ default.
+ :vartype language: str
"""
- _attribute_map = {
- "metadata": {"key": "metadata", "type": "{str}"},
- "compound_operation": {"key": "compoundOperation", "type": "str"},
+ _validation = {
+ "question": {"required": True},
+ "records": {"required": True},
}
- def __init__(
- self,
- *,
- metadata: Optional[Dict[str, str]] = None,
- compound_operation: Optional[Union[str, "CompoundOperationKind"]] = None,
- **kwargs
- ):
- super(MetadataFilter, self).__init__(**kwargs)
- self.metadata = metadata
- self.compound_operation = compound_operation
-
-
-class StrictFilters(msrest.serialization.Model):
- """filters over knowledge base.
-
- :keyword metadata_filter: Find QnAs that are associated with the given list of metadata.
- :paramtype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter
- :keyword source_filter: Find QnAs that are associated with the given list of sources in
- knowledge base.
- :paramtype source_filter: list[str]
- :keyword compound_operation: (Optional) Set to 'OR' for joining metadata using 'OR' operation.
- Possible values include: "AND", "OR".
- :paramtype compound_operation: str or
- ~azure.ai.language.questionanswering.models.CompoundOperationKind
- """
-
_attribute_map = {
- "metadata_filter": {"key": "metadataFilter", "type": "MetadataFilter"},
- "source_filter": {"key": "sourceFilter", "type": "[str]"},
- "compound_operation": {"key": "compoundOperation", "type": "str"},
+ "question": {"key": "question", "type": "str"},
+ "records": {"key": "records", "type": "[TextRecord]"},
+ "language": {"key": "language", "type": "str"},
+ "string_index_type": {"key": "stringIndexType", "type": "str"},
}
- def __init__(
- self,
- *,
- metadata_filter: Optional["MetadataFilter"] = None,
- source_filter: Optional[List[str]] = None,
- compound_operation: Optional[Union[str, "CompoundOperationKind"]] = None,
- **kwargs
- ):
- super(StrictFilters, self).__init__(**kwargs)
- self.metadata_filter = metadata_filter
- self.source_filter = source_filter
- self.compound_operation = compound_operation
+ def __init__(self, *, question: str, records: List["TextRecord"], language: Optional[str] = None, **kwargs):
+ """
+ :keyword question: Required. User question to query against the given text records.
+ :paramtype question: str
+ :keyword records: Required. Text records to be searched for given question.
+ :paramtype records: list[~azure.ai.language.questionanswering.models.TextRecord]
+ :keyword language: Language of the text records. This is BCP-47 representation of a language.
+ For example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
+ default.
+ :paramtype language: str
+ """
+ super(QueryTextOptions, self).__init__(**kwargs)
+ self.question = question
+ self.records = records
+ self.language = language
+ self.string_index_type = "UnicodeCodePoint"
class TextAnswer(msrest.serialization.Model):
"""Represents answer result.
- :keyword answer: Answer.
- :paramtype answer: str
- :keyword confidence_score: answer confidence score, value ranges from 0 to 1.
- :paramtype confidence_score: float
- :keyword id: record ID.
- :paramtype id: str
- :keyword answer_span: Answer span object with respect to user's question.
- :paramtype answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
- :keyword offset: The sentence offset from the start of the document.
- :paramtype offset: int
- :keyword length: The length of the sentence.
- :paramtype length: int
+ :ivar answer: Answer.
+ :vartype answer: str
+ :ivar confidence_score: answer confidence score, value ranges from 0 to 1.
+ :vartype confidence_score: float
+ :ivar id: record ID.
+ :vartype id: str
+ :ivar answer_span: Answer span object with respect to user's question.
+ :vartype answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
+ :ivar offset: The sentence offset from the start of the document.
+ :vartype offset: int
+ :ivar length: The length of the sentence.
+ :vartype length: int
"""
_validation = {
@@ -559,6 +746,20 @@ def __init__(
length: Optional[int] = None,
**kwargs
):
+ """
+ :keyword answer: Answer.
+ :paramtype answer: str
+ :keyword confidence_score: answer confidence score, value ranges from 0 to 1.
+ :paramtype confidence_score: float
+ :keyword id: record ID.
+ :paramtype id: str
+ :keyword answer_span: Answer span object with respect to user's question.
+ :paramtype answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
+ :keyword offset: The sentence offset from the start of the document.
+ :paramtype offset: int
+ :keyword length: The length of the sentence.
+ :paramtype length: int
+ """
super(TextAnswer, self).__init__(**kwargs)
self.answer = answer
self.confidence_score = confidence_score
@@ -571,8 +772,8 @@ def __init__(
class TextAnswers(msrest.serialization.Model):
"""Represents the answer results.
- :keyword answers: Represents the answer results.
- :paramtype answers: list[~azure.ai.language.questionanswering.models.TextAnswer]
+ :ivar answers: Represents the answer results.
+ :vartype answers: list[~azure.ai.language.questionanswering.models.TextAnswer]
"""
_attribute_map = {
@@ -580,68 +781,23 @@ class TextAnswers(msrest.serialization.Model):
}
def __init__(self, *, answers: Optional[List["TextAnswer"]] = None, **kwargs):
+ """
+ :keyword answers: Represents the answer results.
+ :paramtype answers: list[~azure.ai.language.questionanswering.models.TextAnswer]
+ """
super(TextAnswers, self).__init__(**kwargs)
self.answers = answers
-class TextQueryOptions(msrest.serialization.Model):
- """The question and text record parameters to answer.
-
- All required parameters must be populated in order to send to Azure.
-
- :keyword question: Required. User question to query against the given text records.
- :paramtype question: str
- :keyword records: Required. Text records to be searched for given question.
- :paramtype records: list[~azure.ai.language.questionanswering.models.TextRecord]
- :keyword language: Language of the text records. This is BCP-47 representation of a language.
- For example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
- default.
- :paramtype language: str
- :keyword string_index_type: Specifies the method used to interpret string offsets. Defaults to
- Text Elements (Graphemes) according to Unicode v8.0.0. For additional information see
- https://aka.ms/text-analytics-offsets. Possible values include: "TextElements_v8",
- "UnicodeCodePoint", "Utf16CodeUnit". Default value: "TextElements_v8".
- :paramtype string_index_type: str or
- ~azure.ai.language.questionanswering.models.StringIndexType
- """
-
- _validation = {
- "question": {"required": True},
- "records": {"required": True},
- }
-
- _attribute_map = {
- "question": {"key": "question", "type": "str"},
- "records": {"key": "records", "type": "[TextRecord]"},
- "language": {"key": "language", "type": "str"},
- "string_index_type": {"key": "stringIndexType", "type": "str"},
- }
-
- def __init__(
- self,
- *,
- question: str,
- records: List["TextRecord"],
- language: Optional[str] = None,
- string_index_type: Optional[Union[str, "StringIndexType"]] = "TextElements_v8",
- **kwargs
- ):
- super(TextQueryOptions, self).__init__(**kwargs)
- self.question = question
- self.records = records
- self.language = language
- self.string_index_type = string_index_type
-
-
class TextRecord(msrest.serialization.Model):
"""Represent input text record to be queried.
All required parameters must be populated in order to send to Azure.
- :keyword id: Required. Unique identifier for the text record.
- :paramtype id: str
- :keyword text: Required. Text contents of the record.
- :paramtype text: str
+ :ivar id: Required. Unique identifier for the text record.
+ :vartype id: str
+ :ivar text: Required. Text contents of the record.
+ :vartype text: str
"""
_validation = {
@@ -655,6 +811,12 @@ class TextRecord(msrest.serialization.Model):
}
def __init__(self, *, id: str, text: str, **kwargs):
+ """
+ :keyword id: Required. Unique identifier for the text record.
+ :paramtype id: str
+ :keyword text: Required. Text contents of the record.
+ :paramtype text: str
+ """
super(TextRecord, self).__init__(**kwargs)
self.id = id
self.text = text
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_question_answering_client_enums.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_question_answering_client_enums.py
index 869e123dc112..d6c78563d2b6 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_question_answering_client_enums.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_question_answering_client_enums.py
@@ -11,13 +11,6 @@
from azure.core import CaseInsensitiveEnumMeta
-class CompoundOperationKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
- """(Optional) Set to 'OR' for joining metadata using 'OR' operation."""
-
- AND_ENUM = "AND"
- OR_ENUM = "OR"
-
-
class ErrorCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Human-readable error code."""
@@ -42,26 +35,15 @@ class InnerErrorCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
EXTRACTION_FAILURE = "ExtractionFailure"
+class LogicalOperationKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
+ """Set to 'OR' or 'AND' for using corresponding logical operation."""
+
+ AND_ENUM = "AND"
+ OR_ENUM = "OR"
+
+
class RankerType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""(Optional) Set to 'QuestionOnly' for using a question only Ranker."""
DEFAULT = "Default"
QUESTION_ONLY = "QuestionOnly"
-
-
-class StringIndexType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
- """Specifies the method used to interpret string offsets. Defaults to Text Elements (Graphemes)
- according to Unicode v8.0.0. For additional information see
- https://aka.ms/text-analytics-offsets.
- """
-
- #: Returned offset and length values will correspond to TextElements (Graphemes and Grapheme
- #: clusters) confirming to the Unicode 8.0.0 standard. Use this option if your application is
- #: written in .Net Framework or .Net Core and you will be using StringInfo.
- TEXT_ELEMENTS_V8 = "TextElements_v8"
- #: Returned offset and length values will correspond to Unicode code points. Use this option if
- #: your application is written in a language that support Unicode, for example Python.
- UNICODE_CODE_POINT = "UnicodeCodePoint"
- #: Returned offset and length values will correspond to UTF-16 code units. Use this option if your
- #: application is written in a language that support Unicode, for example Java, JavaScript.
- UTF16_CODE_UNIT = "Utf16CodeUnit"
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/operations/_operations.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/operations/_operations.py
index 4f8f585e2a7e..a18c2b15911b 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/operations/_operations.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/operations/_operations.py
@@ -22,7 +22,7 @@
from msrest import Serializer
from .. import models as _models
-from .._patch import _validate_text_records
+from .._patch import _validate_text_records, _get_positional_body, _verify_qna_id_and_question
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
@@ -40,7 +40,7 @@ def build_query_knowledge_base_request(
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
project_name = kwargs.pop('project_name') # type: str
- deployment_name = kwargs.pop('deployment_name', None) # type: Optional[str]
+ deployment_name = kwargs.pop('deployment_name') # type: str
api_version = "2021-07-15-preview"
accept = "application/json"
@@ -50,8 +50,7 @@ def build_query_knowledge_base_request(
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['projectName'] = _SERIALIZER.query("project_name", project_name, 'str')
- if deployment_name is not None:
- query_parameters['deploymentName'] = _SERIALIZER.query("deployment_name", deployment_name, 'str')
+ query_parameters['deploymentName'] = _SERIALIZER.query("deployment_name", deployment_name, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
@@ -103,21 +102,20 @@ class QuestionAnsweringClientOperationsMixin(object):
@overload
def query_knowledge_base(
self,
- knowledge_base_query_options, # type: "_models.KnowledgeBaseQueryOptions"
+ options, # type: "_models.QueryKnowledgeBaseOptions"
**kwargs # type: Any
):
# type: (...) -> "_models.KnowledgeBaseAnswers"
"""Answers the specified question using your knowledge base.
- :param knowledge_base_query_options: Post body of the request.
- :type knowledge_base_query_options:
- ~azure.ai.language.questionanswering.models.KnowledgeBaseQueryOptions
+ :param options: Positional-only POST body of the request.
+ :type options:
+ ~azure.ai.language.questionanswering.models.QueryKnowledgeBaseOptions
:keyword project_name: The name of the project to use.
:paramtype project_name: str
:keyword deployment_name: The name of the specific deployment of the project to use.
:paramtype deployment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: KnowledgeBaseAnswers, or the result of cls(response)
+ :return: KnowledgeBaseAnswers
:rtype: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
@@ -152,14 +150,13 @@ def query_knowledge_base(
:keyword ranker_type: (Optional) Set to 'QuestionOnly' for using a question only Ranker. Possible
values include: "Default", "QuestionOnly".
:paramtype ranker_type: str or ~azure.ai.language.questionanswering.models.RankerType
- :keyword strict_filters: Filter QnAs based on give metadata list and knowledge base source names.
- :paramtype strict_filters: ~azure.ai.language.questionanswering.models.StrictFilters
+ :keyword filters: Filter QnAs based on give metadata list and knowledge base source names.
+ :paramtype filters: ~azure.ai.language.questionanswering.models.QueryFilters
:keyword answer_span_request: To configure Answer span prediction feature.
:paramtype answer_span_request: ~azure.ai.language.questionanswering.models.AnswerSpanRequest
:keyword include_unstructured_sources: (Optional) Flag to enable Query over Unstructured Sources.
:paramtype include_unstructured_sources: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: KnowledgeBaseAnswers, or the result of cls(response)
+ :return: KnowledgeBaseAnswers
:rtype: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
@@ -167,21 +164,21 @@ def query_knowledge_base(
def query_knowledge_base(
self,
- *args, # type: "_models.KnowledgeBaseQueryOptions"
+ *args, # type: "_models.QueryKnowledgeBaseOptions"
**kwargs # type: Any
):
# type: (...) -> "_models.KnowledgeBaseAnswers"
"""Answers the specified question using your knowledge base.
- :param knowledge_base_query_options: Post body of the request. Provide either `knowledge_base_query_options`, OR
+ :param options: POST body of the request. Provide either `options`, OR
individual keyword arguments. If both are provided, only the options object will be used.
- :type knowledge_base_query_options:
- ~azure.ai.language.questionanswering.models.KnowledgeBaseQueryOptions
+ :type options:
+ ~azure.ai.language.questionanswering.models.QueryKnowledgeBaseOptions
:keyword project_name: The name of the project to use.
:paramtype project_name: str
:keyword deployment_name: The name of the specific deployment of the project to use.
:paramtype deployment_name: str
- :keyword question: User question to query against the knowledge base. Provide either `knowledge_base_query_options`, OR
+ :keyword question: User question to query against the knowledge base. Provide either `options`, OR
individual keyword arguments. If both are provided, only the options object will be used.
:paramtype question: str
:keyword qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over question.
@@ -197,40 +194,37 @@ def query_knowledge_base(
:keyword ranker_type: (Optional) Set to 'QuestionOnly' for using a question only Ranker. Possible
values include: "Default", "QuestionOnly".
:paramtype ranker_type: str or ~azure.ai.language.questionanswering.models.RankerType
- :keyword strict_filters: Filter QnAs based on give metadata list and knowledge base source names.
- :paramtype strict_filters: ~azure.ai.language.questionanswering.models.StrictFilters
+ :keyword filters: Filter QnAs based on give metadata list and knowledge base source names.
+ :paramtype filters: ~azure.ai.language.questionanswering.models.QueryFilters
:keyword answer_span_request: To configure Answer span prediction feature.
:paramtype answer_span_request: ~azure.ai.language.questionanswering.models.AnswerSpanRequest
:keyword include_unstructured_sources: (Optional) Flag to enable Query over Unstructured Sources.
:paramtype include_unstructured_sources: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: KnowledgeBaseAnswers, or the result of cls(response)
+ :return: KnowledgeBaseAnswers
:rtype: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
- if args:
- knowledge_base_query_options = args[0]
- else:
- knowledge_base_query_options = _models.KnowledgeBaseQueryOptions(
- qna_id=kwargs.pop("qna_id", None),
- question=kwargs.pop("question", None),
- top=kwargs.pop("top", None),
- user_id=kwargs.pop("user_id", None),
- confidence_score_threshold=kwargs.pop("confidence_score_threshold", None),
- context=kwargs.pop("context", None),
- ranker_type=kwargs.pop("ranker_type", None),
- strict_filters=kwargs.pop("strict_filters", None),
- answer_span_request=kwargs.pop("answer_span_request", None),
- include_unstructured_sources=kwargs.pop("include_unstructured_sources", None)
- )
+ options = _get_positional_body(*args, **kwargs) or _models.QueryKnowledgeBaseOptions(
+ qna_id=kwargs.pop("qna_id", None),
+ question=kwargs.pop("question", None),
+ top=kwargs.pop("top", None),
+ user_id=kwargs.pop("user_id", None),
+ confidence_score_threshold=kwargs.pop("confidence_score_threshold", None),
+ context=kwargs.pop("context", None),
+ ranker_type=kwargs.pop("ranker_type", None),
+ filters=kwargs.pop("filters", None),
+ answer_span_request=kwargs.pop("answer_span_request", None),
+ include_unstructured_sources=kwargs.pop("include_unstructured_sources", None)
+ )
+ _verify_qna_id_and_question(options)
cls = kwargs.pop("cls", None) # type: ClsType["_models.KnowledgeBaseAnswers"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
project_name = kwargs.pop("project_name") # type: str
- deployment_name = kwargs.pop("deployment_name", None) # type: Optional[str]
+ deployment_name = kwargs.pop("deployment_name") # type: str
- json = self._serialize.body(knowledge_base_query_options, "KnowledgeBaseQueryOptions")
+ json = self._serialize.body(options, "QueryKnowledgeBaseOptions")
request = build_query_knowledge_base_request(
content_type=content_type,
@@ -244,7 +238,7 @@ def query_knowledge_base(
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
@@ -264,16 +258,15 @@ def query_knowledge_base(
@overload
def query_text(
self,
- text_query_options, # type: "_models.TextQueryOptions"
+ options, # type: "_models.QueryTextOptions"
**kwargs # type: Any
):
# type: (...) -> "_models.TextAnswers"
"""Answers the specified question using the provided text in the body.
- :param text_query_options: Post body of the request.
- :type text_query_options: ~azure.ai.language.questionanswering.models.TextQueryOptions
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: TextAnswers, or the result of cls(response)
+ :param options: Positional-only POST body of the request.
+ :type options: ~azure.ai.language.questionanswering.models.QueryTextOptions
+ :return: TextAnswers
:rtype: ~azure.ai.language.questionanswering.models.TextAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
@@ -295,13 +288,7 @@ def query_text(
example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
default.
:paramtype language: str
- :keyword string_index_type: Specifies the method used to interpret string offsets. Defaults to
- Text Elements (Graphemes) according to Unicode v8.0.0. For additional information see
- https://aka.ms/text-analytics-offsets. Possible values include: "TextElements_v8",
- "UnicodeCodePoint", "Utf16CodeUnit". Default value: "TextElements_v8".
- :paramtype string_index_type: str or ~azure.ai.language.questionanswering.models.StringIndexType
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: TextAnswers, or the result of cls(response)
+ :return: TextAnswers
:rtype: ~azure.ai.language.questionanswering.models.TextAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
@@ -309,54 +296,44 @@ def query_text(
def query_text(
self,
- *args, # type: "_models.TextQueryOptions"
+ *args, # type: "_models.QueryTextOptions"
**kwargs # type: Any
):
# type: (...) -> "_models.TextAnswers"
"""Answers the specified question using the provided text in the body.
- :param text_query_options: Post body of the request. Provide either `text_query_options`, OR
+ :param options: POST body of the request. Provide either `options`, OR
individual keyword arguments. If both are provided, only the options object will be used.
- :type text_query_options: ~azure.ai.language.questionanswering.models.TextQueryOptions
- :keyword question: User question to query against the given text records. Provide either `text_query_options`,
+ :type options: ~azure.ai.language.questionanswering.models.QueryTextOptions
+ :keyword question: User question to query against the given text records. Provide either `options`,
OR individual keyword arguments. If both are provided, only the options object will be used.
:paramtype question: str
- :keyword records: Text records to be searched for given question. Provide either `text_query_options`, OR
+ :keyword records: Text records to be searched for given question. Provide either `options`, OR
individual keyword arguments. If both are provided, only the options object will be used.
:paramtype records: list[str or ~azure.ai.language.questionanswering.models.TextRecord]
:keyword language: Language of the text records. This is BCP-47 representation of a language. For
example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as default.
:paramtype language: str
- :keyword string_index_type: Specifies the method used to interpret string offsets. Defaults to
- Text Elements (Graphemes) according to Unicode v8.0.0. For additional information see
- https://aka.ms/text-analytics-offsets. Possible values include: "TextElements_v8",
- "UnicodeCodePoint", "Utf16CodeUnit". Default value: "TextElements_v8".
- :paramtype string_index_type: str or ~azure.ai.language.questionanswering.models.StringIndexType
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: TextAnswers, or the result of cls(response)
+ :return: TextAnswers
:rtype: ~azure.ai.language.questionanswering.models.TextAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
- if args:
- text_query_options = args[0]
- else:
- text_query_options = _models.TextQueryOptions(
- question=kwargs.pop("question"),
- records=kwargs.pop("records"),
- language=kwargs.pop("language", None),
- string_index_type=kwargs.pop("string_index_type", "TextElements_v8")
- )
+ options = _get_positional_body(*args, **kwargs) or _models.QueryTextOptions(
+ question=kwargs.pop("question"),
+ records=kwargs.pop("records"),
+ language=kwargs.pop("language", self._default_language),
+ )
try:
- text_query_options['records'] = _validate_text_records(text_query_options['records'])
+ options['records'] = _validate_text_records(options['records'])
except TypeError:
- text_query_options.records = _validate_text_records(text_query_options.records)
+ options.records = _validate_text_records(options.records)
cls = kwargs.pop("cls", None) # type: ClsType["_models.TextAnswers"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
- json = self._serialize.body(text_query_options, "TextQueryOptions")
+ json = self._serialize.body(options, "QueryTextOptions")
request = build_query_text_request(
content_type=content_type,
@@ -368,7 +345,7 @@ def query_text(
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_chat_async.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_chat_async.py
index 483e68f85705..fa80fc5e0c3c 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_chat_async.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_chat_async.py
@@ -35,7 +35,7 @@ async def sample_chit_chat():
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
async with client:
- first_question = qna.KnowledgeBaseQueryOptions(
+ first_question = qna.QueryKnowledgeBaseOptions(
question="How long should my Surface battery last?",
top=3,
confidence_score_threshold=0.2,
@@ -56,7 +56,7 @@ async def sample_chit_chat():
print("Q: {}".format(first_question.question))
print("A: {}".format(best_candidate.answer))
- followup_question = qna.KnowledgeBaseQueryOptions(
+ followup_question = qna.QueryKnowledgeBaseOptions(
question="How long it takes to charge Surface?",
top=3,
confidence_score_threshold=0.2,
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_knowledgebase_async.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_knowledgebase_async.py
index 2f5b0ef1f2e5..93a4b946f7ef 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_knowledgebase_async.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_knowledgebase_async.py
@@ -35,7 +35,7 @@ async def sample_query_knowledgebase():
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
async with client:
- input = qna.KnowledgeBaseQueryOptions(
+ input = qna.QueryKnowledgeBaseOptions(
question="How long should my Surface battery last?",
top=3,
confidence_score_threshold=0.2,
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_text_async.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_text_async.py
index a34195f7e320..8b7df0051fd0 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_text_async.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_text_async.py
@@ -32,7 +32,7 @@ async def sample_query_text():
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
async with client:
- input = qna.TextQueryOptions(
+ input = qna.QueryTextOptions(
question="How long it takes to charge surface?",
records=[
qna.TextRecord(
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_chat.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_chat.py
index 68077ff16f05..033c59be5529 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_chat.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_chat.py
@@ -33,7 +33,7 @@ def sample_chit_chat():
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
with client:
- first_question = qna.KnowledgeBaseQueryOptions(
+ first_question = qna.QueryKnowledgeBaseOptions(
question="How long should my Surface battery last?",
top=3,
confidence_score_threshold=0.2,
@@ -51,10 +51,10 @@ def sample_chit_chat():
deployment_name="test"
)
best_candidate = [a for a in output.answers if a.confidence_score > 0.9][0]
- print("Q: {}".format(first_question.question))
- print("A: {}".format(best_candidate.answer))
+ print(u"Q: {}".format(first_question.question))
+ print(u"A: {}".format(best_candidate.answer))
- followup_question = qna.KnowledgeBaseQueryOptions(
+ followup_question = qna.QueryKnowledgeBaseOptions(
question="How long it takes to charge Surface?",
top=3,
confidence_score_threshold=0.2,
@@ -75,8 +75,8 @@ def sample_chit_chat():
project_name=knowledge_base_project,
deployment_name="test"
)
- print("Q: {}".format(followup_question.question))
- print("A: {}".format(output.answers[0].answer))
+ print(u"Q: {}".format(followup_question.question))
+ print(u"A: {}".format(output.answers[0].answer))
# [END chit_chat]
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_knowledgebase.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_knowledgebase.py
index 21599649b602..6bfb59022c0b 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_knowledgebase.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_knowledgebase.py
@@ -33,7 +33,7 @@ def sample_query_knowledgebase():
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
with client:
- input = qna.KnowledgeBaseQueryOptions(
+ input = qna.QueryKnowledgeBaseOptions(
question="How long should my Surface battery last?",
top=3,
confidence_score_threshold=0.2,
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_text.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_text.py
index 9f784b5a5e4c..47582b2d106a 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_text.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_text.py
@@ -31,12 +31,12 @@ def sample_query_text():
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
with client:
- input = qna.TextQueryOptions(
+ input = qna.QueryTextOptions(
question="How long it takes to charge surface?",
records=[
qna.TextRecord(
text="Power and charging. It takes two to four hours to charge the Surface Pro 4 battery fully from an empty state. " +
- "It can take longer if you’re using your Surface for power-intensive activities like gaming or video streaming while you’re charging it.",
+ "It can take longer if you're using your Surface for power-intensive activities like gaming or video streaming while you're charging it.",
id="doc1"
),
qna.TextRecord(
@@ -50,8 +50,8 @@ def sample_query_text():
output = client.query_text(input)
best_answer = [a for a in output.answers if a.confidence_score > 0.9][0]
- print("Q: {}".format(input.question))
- print("A: {}".format(best_answer.answer))
+ print(u"Q: {}".format(input.question))
+ print(u"A: {}".format(best_answer.answer))
# [END query_text]
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/setup.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/setup.py
index 976409977724..43de47ef0e69 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/setup.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/setup.py
@@ -53,20 +53,21 @@
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
- # This means any folder structure that only consists of a __init__.py.
- # For example, for storage, this would mean adding 'azure.storage'
+ # This means any folder structure that only consists of a __init__.py.
+ # For example, for storage, this would mean adding 'azure.storage'
# in addition to the default 'azure' that is seen here.
'azure',
'azure.ai',
'azure.ai.language',
]),
install_requires=[
- 'azure-core<2.0.0,>=1.16.0',
+ 'azure-core<2.0.0,>=1.19.0',
'msrest>=0.6.21',
],
extras_require={
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/swagger/README.md b/sdk/cognitivelanguage/azure-ai-language-questionanswering/swagger/README.md
index 631ff0b2fbb5..2ba0ad9472a9 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/swagger/README.md
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/swagger/README.md
@@ -20,7 +20,7 @@ autorest
### Settings
```yaml
-input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/feature/cognitiveservices/language/specification/cognitiveservices/data-plane/Language/preview/2021-07-15-preview/questionanswering.json
+input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/68e7988aba481206f08019d3efb585683d5bc577/specification/cognitiveservices/data-plane/Language/preview/2021-07-15-preview/questionanswering.json
output-folder: ../azure/ai/language/questionanswering
namespace: azure.ai.language.questionanswering
package-name: azure-ai-language-questionanswering
@@ -58,3 +58,65 @@ directive:
$["operationId"] = "queryText";
```
+### Rename `KnowledgeBasedQueryOptions` -> `Options`
+
+```yaml
+directive:
+ - from: swagger-document
+ where: $["parameters"]["KnowledgeBaseQueryOptions"]
+ transform: >
+ $["x-ms-client-name"] = "Options";
+```
+
+### Rename `TextQueryOptions` -> `Options`
+
+```yaml
+directive:
+ - from: swagger-document
+ where: $["parameters"]["TextQueryOptions"]
+ transform: >
+ $["x-ms-client-name"] = "Options";
+```
+
+### Rename `KnowledgeBaseQueryOptions` -> `QueryKnowledgeBaseOptions`
+
+```yaml
+directive:
+ - from: swagger-document
+ where: $["definitions"]["KnowledgeBaseQueryOptions"]
+ transform: >
+ $["x-ms-client-name"] = "QueryKnowledgeBaseOptions";
+```
+
+### Rename `TextQueryOptions` -> `QueryTextOptions`
+
+```yaml
+directive:
+ - from: swagger-document
+ where: $["definitions"]["TextQueryOptions"]
+ transform: >
+ $["x-ms-client-name"] = "QueryTextOptions";
+```
+
+### Delete `StringIndexType`
+
+```yaml
+directive:
+ - from: swagger-document
+ where: $["definitions"]["TextQueryOptions"]
+ transform: >
+ delete $.properties["stringIndexType"]
+```
+
+### Make `MetadataFilter`'s `metadata` property a list of string
+
+```yaml
+directive:
+ - from: swagger-document
+ where: $["definitions"]
+ transform: >
+ delete $["MetadataFilter"]["properties"]["metadata"]["items"]["$ref"];
+ $["MetadataFilter"]["properties"]["metadata"]["items"]["type"] = "array";
+ $["MetadataFilter"]["properties"]["metadata"]["items"]["items"] = {"type": "string"};
+ delete $["MetadataRecord"];
+```
\ No newline at end of file
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase.yaml
index afdbe07a2dba..1a5ee269c0f9 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase.yaml
@@ -82,10 +82,10 @@ interactions:
devices directly to your Surface Pro 4 using the USB port, Mini DisplayPort,
or Bluetooth. Or, connect everything to a Surface Dock (sold separately).
With Surface Dock, you can switch between fully connected and fully mobile
- with a single connector.\",\n \"confidenceScore\": 0.46380000000000005,\n
- \ \"id\": 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
- \ \"metadata\": {\n \"explicitlytaggedheading\": \"connect monitors,
- accessories, and other devices\"\n },\n \"dialog\": {\n \"isContextOnly\":
+ with a single connector.\",\n \"confidenceScore\": 0.4067,\n \"id\":
+ 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
+ {\n \"explicitlytaggedheading\": \"connect monitors, accessories, and
+ other devices\"\n },\n \"dialog\": {\n \"isContextOnly\":
false,\n \"prompts\": [\n {\n \"displayOrder\":
0,\n \"qnaId\": 65,\n \"displayText\": \"Set up your
workspace with Surface Dock\"\n },\n {\n \"displayOrder\":
@@ -99,27 +99,27 @@ interactions:
is for video only. Audio will play from your Surface speakers unless you\u2019ve
connected external speakers. For more info about this, see [Surface sound,](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
[volume, and audio accessories](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
- on Surface.com.\",\n \"confidenceScore\": 0.4307,\n \"id\": 68,\n
+ on Surface.com.\",\n \"confidenceScore\": 0.3392,\n \"id\": 68,\n
\ \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{},\n \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n }\n ]\n}"
headers:
apim-request-id:
- - af2f063d-0c1a-4be9-8022-4748b0b968f4
+ - d0dc208c-60f1-4669-bddb-4522e3949ecf
content-length:
- - '7136'
+ - '7123'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
date:
- - Tue, 21 Sep 2021 21:06:29 GMT
+ - Thu, 30 Sep 2021 15:52:34 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '1908'
+ - '1273'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_filter.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_filter.yaml
new file mode 100644
index 000000000000..29691ffe60eb
--- /dev/null
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_filter.yaml
@@ -0,0 +1,66 @@
+interactions:
+- request:
+ body: '{"question": "Battery life", "top": 3, "filters": {"metadataFilter": {"metadata":
+ [["explicitlytaggedheading", "check the battery level"], ["explicitlytaggedheading",
+ "make your battery last"]]}, "logicalOperation": "OR"}}'
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '222'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-ai-language-questionanswering/1.0.0b2 Python/3.9.5 (macOS-11.5.2-x86_64-i386-64bit)
+ method: POST
+ uri: https://test-resource.api.cognitive.microsoft.com/language/:query-knowledgebases?projectName=test-project&deploymentName=test&api-version=2021-07-15-preview
+ response:
+ body:
+ string: "{\n \"answers\": [\n {\n \"questions\": [\n \"Check
+ the battery level\"\n ],\n \"answer\": \"**Check the battery level**\\n\\nYou
+ can check the battery level from the lock screen or the desktop:\",\n \"confidenceScore\":
+ 0.6905,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ \ \"metadata\": {\n \"explicitlytaggedheading\": \"check the battery
+ level\"\n },\n \"dialog\": {\n \"isContextOnly\": false,\n
+ \ \"prompts\": []\n }\n },\n {\n \"questions\": [\n
+ \ \"Make your battery last\"\n ],\n \"answer\": \"**Make your
+ battery last**\\n\\nFor info on how to care for your battery and power supply,
+ conserve power, and make your Surface battery last longer, see [Surface battery
+ and power](http://www.microsoft.com/surface/support/hardware-and-drivers/battery-and-power)
+ on Surface.com.\",\n \"confidenceScore\": 0.5818,\n \"id\": 27,\n
+ \ \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
+ {\n \"explicitlytaggedheading\": \"make your battery last\"\n },\n
+ \ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
+ []\n }\n },\n {\n \"questions\": [\n \"Desktop taskbar.\"\n
+ \ ],\n \"answer\": \"**Desktop taskbar.**\\n\\nBattery status appears
+ at the right side of the taskbar. Select the battery icon for info about the
+ charging and battery status, including the percent remaining. \u272A\",\n
+ \ \"confidenceScore\": 0.36340000000000006,\n \"id\": 26,\n \"source\":
+ \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\": {},\n \"dialog\":
+ {\n \"isContextOnly\": false,\n \"prompts\": []\n }\n }\n
+ \ ]\n}"
+ headers:
+ apim-request-id:
+ - 7884df53-4df8-488a-b78c-692ac7f4f783
+ content-length:
+ - '1622'
+ content-type:
+ - application/json; charset=utf-8
+ csp-billing-usage:
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ date:
+ - Thu, 30 Sep 2021 15:52:33 GMT
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ x-content-type-options:
+ - nosniff
+ x-envoy-upstream-service-time:
+ - '700'
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_llc.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_llc.yaml
index ae90e71dfc59..45d018962798 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_llc.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_llc.yaml
@@ -82,10 +82,10 @@ interactions:
devices directly to your Surface Pro 4 using the USB port, Mini DisplayPort,
or Bluetooth. Or, connect everything to a Surface Dock (sold separately).
With Surface Dock, you can switch between fully connected and fully mobile
- with a single connector.\",\n \"confidenceScore\": 0.46380000000000005,\n
- \ \"id\": 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
- \ \"metadata\": {\n \"explicitlytaggedheading\": \"connect monitors,
- accessories, and other devices\"\n },\n \"dialog\": {\n \"isContextOnly\":
+ with a single connector.\",\n \"confidenceScore\": 0.4067,\n \"id\":
+ 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
+ {\n \"explicitlytaggedheading\": \"connect monitors, accessories, and
+ other devices\"\n },\n \"dialog\": {\n \"isContextOnly\":
false,\n \"prompts\": [\n {\n \"displayOrder\":
0,\n \"qnaId\": 65,\n \"displayText\": \"Set up your
workspace with Surface Dock\"\n },\n {\n \"displayOrder\":
@@ -99,27 +99,27 @@ interactions:
is for video only. Audio will play from your Surface speakers unless you\u2019ve
connected external speakers. For more info about this, see [Surface sound,](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
[volume, and audio accessories](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
- on Surface.com.\",\n \"confidenceScore\": 0.4307,\n \"id\": 68,\n
+ on Surface.com.\",\n \"confidenceScore\": 0.3392,\n \"id\": 68,\n
\ \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{},\n \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n }\n ]\n}"
headers:
apim-request-id:
- - 09599054-e8a3-4df2-a441-897e59381452
+ - 020d4cd9-b1fe-49ce-8ce1-e1f95aefbbf3
content-length:
- - '7136'
+ - '7123'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
date:
- - Tue, 21 Sep 2021 21:06:30 GMT
+ - Thu, 30 Sep 2021 15:52:34 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '493'
+ - '1590'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_llc_with_answerspan.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_llc_with_answerspan.yaml
index 08f52c0c598e..525c23f2acd1 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_llc_with_answerspan.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_llc_with_answerspan.yaml
@@ -77,18 +77,18 @@ interactions:
\ \"qnaId\": 8,\n \"displayText\": \"Memory and storage\"\n
\ },\n {\n \"displayOrder\": 3,\n \"qnaId\":
9,\n \"displayText\": \"Sensors\"\n }\n ]\n },\n
- \ \"answerSpan\": {\n \"text\": \"**Ports and connectors**\\n\\nSurface
- Pro 4 has the ports you expect in a full-feature laptop.\\n\\nFull-size USB
- 3.0 port Connect a USB accessory like a mouse, printer, Ethernet adapter,
- USB drive, or smartphone.\",\n \"confidenceScore\": 0.0106,\n \"offset\":
- 0,\n \"length\": 206\n }\n },\n {\n \"questions\":
- [\n \"Connect monitors, accessories, and other devices\"\n ],\n
- \ \"answer\": \"**Connect monitors, accessories, and other devices**\\n\\nYou
- can connect monitors, accessories, and other devices directly to your Surface
- Pro 4 using the USB port, Mini DisplayPort, or Bluetooth. Or, connect everything
- to a Surface Dock (sold separately). With Surface Dock, you can switch between
- fully connected and fully mobile with a single connector.\",\n \"confidenceScore\":
- 0.46380000000000005,\n \"id\": 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ \ \"answerSpan\": {\n \"text\": \"\\nSurface Pro 4 has the ports
+ you expect in a full-feature laptop.\\n\\nFull-size USB 3.0 port Connect a
+ USB accessory like a mouse, printer, Ethernet adapter, USB drive, or smartphone\",\n
+ \ \"confidenceScore\": 0.5141,\n \"offset\": 25,\n \"length\":
+ 180\n }\n },\n {\n \"questions\": [\n \"Connect monitors,
+ accessories, and other devices\"\n ],\n \"answer\": \"**Connect
+ monitors, accessories, and other devices**\\n\\nYou can connect monitors,
+ accessories, and other devices directly to your Surface Pro 4 using the USB
+ port, Mini DisplayPort, or Bluetooth. Or, connect everything to a Surface
+ Dock (sold separately). With Surface Dock, you can switch between fully connected
+ and fully mobile with a single connector.\",\n \"confidenceScore\": 0.4067,\n
+ \ \"id\": 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
\ \"metadata\": {\n \"explicitlytaggedheading\": \"connect monitors,
accessories, and other devices\"\n },\n \"dialog\": {\n \"isContextOnly\":
false,\n \"prompts\": [\n {\n \"displayOrder\":
@@ -104,27 +104,27 @@ interactions:
is for video only. Audio will play from your Surface speakers unless you\u2019ve
connected external speakers. For more info about this, see [Surface sound,](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
[volume, and audio accessories](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
- on Surface.com.\",\n \"confidenceScore\": 0.4307,\n \"id\": 68,\n
+ on Surface.com.\",\n \"confidenceScore\": 0.3392,\n \"id\": 68,\n
\ \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{},\n \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n }\n ]\n}"
headers:
apim-request-id:
- - 7d3e4c75-318d-47ac-a24f-0e6839903f5d
+ - 8ab8a1da-8fd6-4091-80e3-470885a6b2b7
content-length:
- - '7474'
+ - '7435'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
date:
- - Tue, 21 Sep 2021 21:06:32 GMT
+ - Thu, 30 Sep 2021 15:52:35 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '902'
+ - '2624'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_only_id.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_only_id.yaml
index b82688010904..e6932c6c456c 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_only_id.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_only_id.yaml
@@ -29,21 +29,21 @@ interactions:
[]\n }\n }\n ]\n}"
headers:
apim-request-id:
- - e03a4af6-be5a-4515-a848-981ca32279a2
+ - 4f01bfd0-6d10-4fbd-a02c-81160bc374d2
content-length:
- '583'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
date:
- - Tue, 21 Sep 2021 21:06:33 GMT
+ - Thu, 30 Sep 2021 15:52:34 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '337'
+ - '1071'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_overload.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_overload.yaml
index e848833ab31d..e3d3aacec340 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_overload.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_overload.yaml
@@ -25,41 +25,34 @@ interactions:
your battery last\"\n ],\n \"answer\": \"**Make your battery last**\\n\\nFor
info on how to care for your battery and power supply, conserve power, and
make your Surface battery last longer, see [Surface battery and power](http://www.microsoft.com/surface/support/hardware-and-drivers/battery-and-power)
- on Surface.com.\",\n \"confidenceScore\": 0.9290999999999999,\n \"id\":
+ on Surface.com.\",\n \"confidenceScore\": 0.9201999999999999,\n \"id\":
27,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{\n \"explicitlytaggedheading\": \"make your battery last\"\n },\n
\ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n },\n {\n \"questions\": [\n \"Check the battery
level\"\n ],\n \"answer\": \"**Check the battery level**\\n\\nYou
can check the battery level from the lock screen or the desktop:\",\n \"confidenceScore\":
- 0.3069,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ 0.4412,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
\ \"metadata\": {\n \"explicitlytaggedheading\": \"check the battery
level\"\n },\n \"dialog\": {\n \"isContextOnly\": false,\n
- \ \"prompts\": []\n }\n },\n {\n \"questions\": [\n
- \ \"Desktop taskbar.\"\n ],\n \"answer\": \"**Desktop taskbar.**\\n\\nBattery
- status appears at the right side of the taskbar. Select the battery icon for
- info about the charging and battery status, including the percent remaining.
- \u272A\",\n \"confidenceScore\": 0.2548,\n \"id\": 26,\n \"source\":
- \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\": {},\n \"dialog\":
- {\n \"isContextOnly\": false,\n \"prompts\": []\n }\n }\n
- \ ]\n}"
+ \ \"prompts\": []\n }\n }\n ]\n}"
headers:
apim-request-id:
- - 8740053b-e4fa-4394-ba6c-28d0b064703c
+ - f02239ca-4edc-4348-b5d6-b3023765bddb
content-length:
- - '1621'
+ - '1140'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
date:
- - Tue, 21 Sep 2021 21:06:35 GMT
+ - Thu, 30 Sep 2021 15:52:34 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '395'
+ - '916'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_python_dict.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_python_dict.yaml
index 3235745609d1..041d14502a21 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_python_dict.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_python_dict.yaml
@@ -29,21 +29,21 @@ interactions:
[]\n }\n }\n ]\n}"
headers:
apim-request-id:
- - 713fabed-16c6-4c14-858e-099bcfa4cbfc
+ - 845c4b78-915e-484c-af56-56ae05539a6e
content-length:
- '583'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
date:
- - Tue, 21 Sep 2021 21:06:36 GMT
+ - Thu, 30 Sep 2021 15:52:34 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '153'
+ - '1099'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_with_answerspan.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_with_answerspan.yaml
index 7815ea378e3d..f90bbb484e56 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_with_answerspan.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_with_answerspan.yaml
@@ -77,18 +77,18 @@ interactions:
\ \"qnaId\": 8,\n \"displayText\": \"Memory and storage\"\n
\ },\n {\n \"displayOrder\": 3,\n \"qnaId\":
9,\n \"displayText\": \"Sensors\"\n }\n ]\n },\n
- \ \"answerSpan\": {\n \"text\": \"**Ports and connectors**\\n\\nSurface
- Pro 4 has the ports you expect in a full-feature laptop.\\n\\nFull-size USB
- 3.0 port Connect a USB accessory like a mouse, printer, Ethernet adapter,
- USB drive, or smartphone.\",\n \"confidenceScore\": 0.0106,\n \"offset\":
- 0,\n \"length\": 206\n }\n },\n {\n \"questions\":
- [\n \"Connect monitors, accessories, and other devices\"\n ],\n
- \ \"answer\": \"**Connect monitors, accessories, and other devices**\\n\\nYou
- can connect monitors, accessories, and other devices directly to your Surface
- Pro 4 using the USB port, Mini DisplayPort, or Bluetooth. Or, connect everything
- to a Surface Dock (sold separately). With Surface Dock, you can switch between
- fully connected and fully mobile with a single connector.\",\n \"confidenceScore\":
- 0.46380000000000005,\n \"id\": 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ \ \"answerSpan\": {\n \"text\": \"\\nSurface Pro 4 has the ports
+ you expect in a full-feature laptop.\\n\\nFull-size USB 3.0 port Connect a
+ USB accessory like a mouse, printer, Ethernet adapter, USB drive, or smartphone\",\n
+ \ \"confidenceScore\": 0.5141,\n \"offset\": 25,\n \"length\":
+ 180\n }\n },\n {\n \"questions\": [\n \"Connect monitors,
+ accessories, and other devices\"\n ],\n \"answer\": \"**Connect
+ monitors, accessories, and other devices**\\n\\nYou can connect monitors,
+ accessories, and other devices directly to your Surface Pro 4 using the USB
+ port, Mini DisplayPort, or Bluetooth. Or, connect everything to a Surface
+ Dock (sold separately). With Surface Dock, you can switch between fully connected
+ and fully mobile with a single connector.\",\n \"confidenceScore\": 0.4067,\n
+ \ \"id\": 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
\ \"metadata\": {\n \"explicitlytaggedheading\": \"connect monitors,
accessories, and other devices\"\n },\n \"dialog\": {\n \"isContextOnly\":
false,\n \"prompts\": [\n {\n \"displayOrder\":
@@ -96,6 +96,10 @@ interactions:
workspace with Surface Dock\"\n },\n {\n \"displayOrder\":
1,\n \"qnaId\": 66,\n \"displayText\": \"Connect or
project to a monitor, screen, or other display\"\n }\n ]\n
+ \ },\n \"answerSpan\": {\n \"text\": \"\\nYou can connect
+ monitors, accessories, and other devices directly to your Surface Pro 4 using
+ the USB port, Mini DisplayPort, or Bluetooth\",\n \"confidenceScore\":
+ 0.051399999999999994,\n \"offset\": 53,\n \"length\": 140\n
\ }\n },\n {\n \"questions\": [\n \"Projector or monitor.\"\n
\ ],\n \"answer\": \"If your monitor has a DisplayPort, you can connect
it to your Surface using a DisplayPort to Mini DisplayPort cable (sold separately).
@@ -104,27 +108,27 @@ interactions:
is for video only. Audio will play from your Surface speakers unless you\u2019ve
connected external speakers. For more info about this, see [Surface sound,](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
[volume, and audio accessories](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
- on Surface.com.\",\n \"confidenceScore\": 0.4307,\n \"id\": 68,\n
+ on Surface.com.\",\n \"confidenceScore\": 0.3392,\n \"id\": 68,\n
\ \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{},\n \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n }\n ]\n}"
headers:
apim-request-id:
- - 0eb4c00c-dd83-45be-8d96-aba3077392c2
+ - 33feb3cb-5959-4ca7-97de-8c95847f517e
content-length:
- - '7474'
+ - '7719'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
date:
- - Tue, 21 Sep 2021 21:06:37 GMT
+ - Thu, 30 Sep 2021 15:52:34 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '342'
+ - '1270'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_with_dictparams.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_with_dictparams.yaml
index 9992a6c804fe..459558c35245 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_with_dictparams.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_with_dictparams.yaml
@@ -25,41 +25,34 @@ interactions:
your battery last\"\n ],\n \"answer\": \"**Make your battery last**\\n\\nFor
info on how to care for your battery and power supply, conserve power, and
make your Surface battery last longer, see [Surface battery and power](http://www.microsoft.com/surface/support/hardware-and-drivers/battery-and-power)
- on Surface.com.\",\n \"confidenceScore\": 0.9290999999999999,\n \"id\":
+ on Surface.com.\",\n \"confidenceScore\": 0.9201999999999999,\n \"id\":
27,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{\n \"explicitlytaggedheading\": \"make your battery last\"\n },\n
\ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n },\n {\n \"questions\": [\n \"Check the battery
level\"\n ],\n \"answer\": \"**Check the battery level**\\n\\nYou
can check the battery level from the lock screen or the desktop:\",\n \"confidenceScore\":
- 0.3069,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ 0.4412,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
\ \"metadata\": {\n \"explicitlytaggedheading\": \"check the battery
level\"\n },\n \"dialog\": {\n \"isContextOnly\": false,\n
- \ \"prompts\": []\n }\n },\n {\n \"questions\": [\n
- \ \"Desktop taskbar.\"\n ],\n \"answer\": \"**Desktop taskbar.**\\n\\nBattery
- status appears at the right side of the taskbar. Select the battery icon for
- info about the charging and battery status, including the percent remaining.
- \u272A\",\n \"confidenceScore\": 0.2548,\n \"id\": 26,\n \"source\":
- \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\": {},\n \"dialog\":
- {\n \"isContextOnly\": false,\n \"prompts\": []\n }\n }\n
- \ ]\n}"
+ \ \"prompts\": []\n }\n }\n ]\n}"
headers:
apim-request-id:
- - 5b72cd45-d5b9-492c-ad05-7172c2ae070d
+ - 1f863859-4839-430b-9e06-3461d04f010e
content-length:
- - '1621'
+ - '1140'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
date:
- - Tue, 21 Sep 2021 21:06:38 GMT
+ - Thu, 30 Sep 2021 15:52:35 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '365'
+ - '2626'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_with_followup.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_with_followup.yaml
index 72af1ce6d157..96d3a557c24b 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_with_followup.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_with_followup.yaml
@@ -25,41 +25,34 @@ interactions:
your battery last\"\n ],\n \"answer\": \"**Make your battery last**\\n\\nFor
info on how to care for your battery and power supply, conserve power, and
make your Surface battery last longer, see [Surface battery and power](http://www.microsoft.com/surface/support/hardware-and-drivers/battery-and-power)
- on Surface.com.\",\n \"confidenceScore\": 0.9290999999999999,\n \"id\":
+ on Surface.com.\",\n \"confidenceScore\": 0.9201999999999999,\n \"id\":
27,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{\n \"explicitlytaggedheading\": \"make your battery last\"\n },\n
\ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n },\n {\n \"questions\": [\n \"Check the battery
level\"\n ],\n \"answer\": \"**Check the battery level**\\n\\nYou
can check the battery level from the lock screen or the desktop:\",\n \"confidenceScore\":
- 0.3069,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ 0.4412,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
\ \"metadata\": {\n \"explicitlytaggedheading\": \"check the battery
level\"\n },\n \"dialog\": {\n \"isContextOnly\": false,\n
- \ \"prompts\": []\n }\n },\n {\n \"questions\": [\n
- \ \"Desktop taskbar.\"\n ],\n \"answer\": \"**Desktop taskbar.**\\n\\nBattery
- status appears at the right side of the taskbar. Select the battery icon for
- info about the charging and battery status, including the percent remaining.
- \u272A\",\n \"confidenceScore\": 0.2548,\n \"id\": 26,\n \"source\":
- \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\": {},\n \"dialog\":
- {\n \"isContextOnly\": false,\n \"prompts\": []\n }\n }\n
- \ ]\n}"
+ \ \"prompts\": []\n }\n }\n ]\n}"
headers:
apim-request-id:
- - 555d76da-0f60-4c4c-8a93-2f3e263ca9c0
+ - 227983f1-c75e-4ee2-910d-589011a7a251
content-length:
- - '1621'
+ - '1140'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
date:
- - Tue, 21 Sep 2021 21:06:39 GMT
+ - Thu, 30 Sep 2021 15:52:35 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '322'
+ - '2660'
status:
code: 200
message: OK
@@ -95,7 +88,7 @@ interactions:
like a phone, while your Surface charges. The USB port on the power supply
is only for charging, not for data transfer. If you want to use a USB device,
plug it into the USB port on your Surface.\",\n \"confidenceScore\":
- 0.6517000000000001,\n \"id\": 23,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ 0.5296,\n \"id\": 23,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
\ \"metadata\": {\n \"explicitlytaggedheading\": \"power and charging\"\n
\ },\n \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[\n {\n \"displayOrder\": 0,\n \"qnaId\": 24,\n
@@ -105,34 +98,34 @@ interactions:
2,\n \"qnaId\": 26,\n \"displayText\": \"Desktop taskbar.\"\n
\ },\n {\n \"displayOrder\": 3,\n \"qnaId\":
27,\n \"displayText\": \"Make your battery last\"\n }\n
- \ ]\n },\n \"answerSpan\": {\n \"text\": \"two to four
- hours\",\n \"confidenceScore\": 0.3086,\n \"offset\": 33,\n
- \ \"length\": 18\n }\n },\n {\n \"questions\": [\n \"Charge
+ \ ]\n },\n \"answerSpan\": {\n \"text\": \" two to
+ four hours\",\n \"confidenceScore\": 0.3795,\n \"offset\": 32,\n
+ \ \"length\": 19\n }\n },\n {\n \"questions\": [\n \"Charge
your Surface Pro 4\"\n ],\n \"answer\": \"**Charge your Surface
Pro 4**\\n\\n1. Connect the two parts of the power cord.\\n\\n2. Connect
the power cord securely to the charging port.\\n\\n3. Plug the power supply
- into an electrical outlet.\",\n \"confidenceScore\": 0.30260000000000004,\n
- \ \"id\": 19,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
- \ \"metadata\": {\n \"explicitlytaggedheading\": \"charge your
- surface pro 4\"\n },\n \"dialog\": {\n \"isContextOnly\":
- false,\n \"prompts\": []\n }\n }\n ]\n}"
+ into an electrical outlet.\",\n \"confidenceScore\": 0.2707,\n \"id\":
+ 19,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
+ {\n \"explicitlytaggedheading\": \"charge your surface pro 4\"\n },\n
+ \ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
+ []\n }\n }\n ]\n}"
headers:
apim-request-id:
- - 9ef0bd69-7736-4ed5-8c21-879df23aaa47
+ - f95352a5-4e6d-4ec1-8809-d164a774dfe6
content-length:
- - '2178'
+ - '2154'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
date:
- - Tue, 21 Sep 2021 21:06:41 GMT
+ - Thu, 30 Sep 2021 15:52:35 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '1046'
+ - '509'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase.yaml
index bf7d70f91563..e522576f82a0 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase.yaml
@@ -78,10 +78,10 @@ interactions:
devices directly to your Surface Pro 4 using the USB port, Mini DisplayPort,
or Bluetooth. Or, connect everything to a Surface Dock (sold separately).
With Surface Dock, you can switch between fully connected and fully mobile
- with a single connector.\",\n \"confidenceScore\": 0.46380000000000005,\n
- \ \"id\": 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
- \ \"metadata\": {\n \"explicitlytaggedheading\": \"connect monitors,
- accessories, and other devices\"\n },\n \"dialog\": {\n \"isContextOnly\":
+ with a single connector.\",\n \"confidenceScore\": 0.4067,\n \"id\":
+ 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
+ {\n \"explicitlytaggedheading\": \"connect monitors, accessories, and
+ other devices\"\n },\n \"dialog\": {\n \"isContextOnly\":
false,\n \"prompts\": [\n {\n \"displayOrder\":
0,\n \"qnaId\": 65,\n \"displayText\": \"Set up your
workspace with Surface Dock\"\n },\n {\n \"displayOrder\":
@@ -95,19 +95,19 @@ interactions:
is for video only. Audio will play from your Surface speakers unless you\u2019ve
connected external speakers. For more info about this, see [Surface sound,](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
[volume, and audio accessories](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
- on Surface.com.\",\n \"confidenceScore\": 0.4307,\n \"id\": 68,\n
+ on Surface.com.\",\n \"confidenceScore\": 0.3392,\n \"id\": 68,\n
\ \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{},\n \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n }\n ]\n}"
headers:
- apim-request-id: ef43aa6b-709e-4d07-a1ca-c4cf33586fc6
- content-length: '7136'
+ apim-request-id: 5971ea57-2aa9-4179-bda9-aba6b937380f
+ content-length: '7123'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
- date: Tue, 21 Sep 2021 21:06:43 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ date: Thu, 30 Sep 2021 15:52:36 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '284'
+ x-envoy-upstream-service-time: '1153'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_filter.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_filter.yaml
new file mode 100644
index 000000000000..abb85ac4c446
--- /dev/null
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_filter.yaml
@@ -0,0 +1,55 @@
+interactions:
+- request:
+ body: '{"question": "Battery life", "top": 3, "filters": {"metadataFilter": {"metadata":
+ [["explicitlytaggedheading", "check the battery level"], ["explicitlytaggedheading",
+ "make your battery last"]]}, "logicalOperation": "OR"}}'
+ headers:
+ Accept:
+ - application/json
+ Content-Length:
+ - '222'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-ai-language-questionanswering/1.0.0b2 Python/3.9.5 (macOS-11.5.2-x86_64-i386-64bit)
+ method: POST
+ uri: https://test-resource.api.cognitive.microsoft.com/language/:query-knowledgebases?projectName=test-project&deploymentName=test&api-version=2021-07-15-preview
+ response:
+ body:
+ string: "{\n \"answers\": [\n {\n \"questions\": [\n \"Check
+ the battery level\"\n ],\n \"answer\": \"**Check the battery level**\\n\\nYou
+ can check the battery level from the lock screen or the desktop:\",\n \"confidenceScore\":
+ 0.6905,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ \ \"metadata\": {\n \"explicitlytaggedheading\": \"check the battery
+ level\"\n },\n \"dialog\": {\n \"isContextOnly\": false,\n
+ \ \"prompts\": []\n }\n },\n {\n \"questions\": [\n
+ \ \"Make your battery last\"\n ],\n \"answer\": \"**Make your
+ battery last**\\n\\nFor info on how to care for your battery and power supply,
+ conserve power, and make your Surface battery last longer, see [Surface battery
+ and power](http://www.microsoft.com/surface/support/hardware-and-drivers/battery-and-power)
+ on Surface.com.\",\n \"confidenceScore\": 0.5818,\n \"id\": 27,\n
+ \ \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
+ {\n \"explicitlytaggedheading\": \"make your battery last\"\n },\n
+ \ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
+ []\n }\n },\n {\n \"questions\": [\n \"Desktop taskbar.\"\n
+ \ ],\n \"answer\": \"**Desktop taskbar.**\\n\\nBattery status appears
+ at the right side of the taskbar. Select the battery icon for info about the
+ charging and battery status, including the percent remaining. \u272A\",\n
+ \ \"confidenceScore\": 0.36340000000000006,\n \"id\": 26,\n \"source\":
+ \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\": {},\n \"dialog\":
+ {\n \"isContextOnly\": false,\n \"prompts\": []\n }\n }\n
+ \ ]\n}"
+ headers:
+ apim-request-id: e958c124-6d15-41c7-93a5-0a242f6b01b1
+ content-length: '1622'
+ content-type: application/json; charset=utf-8
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ date: Thu, 30 Sep 2021 15:52:35 GMT
+ strict-transport-security: max-age=31536000; includeSubDomains; preload
+ x-content-type-options: nosniff
+ x-envoy-upstream-service-time: '1503'
+ status:
+ code: 200
+ message: OK
+ url: https://wuppe.api.cognitive.microsoft.com/language/:query-knowledgebases?projectName=190a9e13-8ede-4e4b-a8fd-c4d7f2aeab6c&deploymentName=test&api-version=2021-07-15-preview
+version: 1
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_llc.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_llc.yaml
index 0de1a13a3c4b..f5d1f2db923a 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_llc.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_llc.yaml
@@ -78,10 +78,10 @@ interactions:
devices directly to your Surface Pro 4 using the USB port, Mini DisplayPort,
or Bluetooth. Or, connect everything to a Surface Dock (sold separately).
With Surface Dock, you can switch between fully connected and fully mobile
- with a single connector.\",\n \"confidenceScore\": 0.46380000000000005,\n
- \ \"id\": 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
- \ \"metadata\": {\n \"explicitlytaggedheading\": \"connect monitors,
- accessories, and other devices\"\n },\n \"dialog\": {\n \"isContextOnly\":
+ with a single connector.\",\n \"confidenceScore\": 0.4067,\n \"id\":
+ 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
+ {\n \"explicitlytaggedheading\": \"connect monitors, accessories, and
+ other devices\"\n },\n \"dialog\": {\n \"isContextOnly\":
false,\n \"prompts\": [\n {\n \"displayOrder\":
0,\n \"qnaId\": 65,\n \"displayText\": \"Set up your
workspace with Surface Dock\"\n },\n {\n \"displayOrder\":
@@ -95,19 +95,19 @@ interactions:
is for video only. Audio will play from your Surface speakers unless you\u2019ve
connected external speakers. For more info about this, see [Surface sound,](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
[volume, and audio accessories](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
- on Surface.com.\",\n \"confidenceScore\": 0.4307,\n \"id\": 68,\n
+ on Surface.com.\",\n \"confidenceScore\": 0.3392,\n \"id\": 68,\n
\ \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{},\n \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n }\n ]\n}"
headers:
- apim-request-id: 1833a5eb-5934-4f16-abab-46148ceb44a1
- content-length: '7136'
+ apim-request-id: f2cf1974-868f-42c6-b183-ff7f5df0ebe4
+ content-length: '7123'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
- date: Tue, 21 Sep 2021 21:06:46 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ date: Thu, 30 Sep 2021 15:52:35 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '276'
+ x-envoy-upstream-service-time: '519'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_llc_with_answerspan.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_llc_with_answerspan.yaml
index cc075a6d8e6f..862b6d6d3678 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_llc_with_answerspan.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_llc_with_answerspan.yaml
@@ -73,18 +73,18 @@ interactions:
\ \"qnaId\": 8,\n \"displayText\": \"Memory and storage\"\n
\ },\n {\n \"displayOrder\": 3,\n \"qnaId\":
9,\n \"displayText\": \"Sensors\"\n }\n ]\n },\n
- \ \"answerSpan\": {\n \"text\": \"**Ports and connectors**\\n\\nSurface
- Pro 4 has the ports you expect in a full-feature laptop.\\n\\nFull-size USB
- 3.0 port Connect a USB accessory like a mouse, printer, Ethernet adapter,
- USB drive, or smartphone.\",\n \"confidenceScore\": 0.0106,\n \"offset\":
- 0,\n \"length\": 206\n }\n },\n {\n \"questions\":
- [\n \"Connect monitors, accessories, and other devices\"\n ],\n
- \ \"answer\": \"**Connect monitors, accessories, and other devices**\\n\\nYou
- can connect monitors, accessories, and other devices directly to your Surface
- Pro 4 using the USB port, Mini DisplayPort, or Bluetooth. Or, connect everything
- to a Surface Dock (sold separately). With Surface Dock, you can switch between
- fully connected and fully mobile with a single connector.\",\n \"confidenceScore\":
- 0.46380000000000005,\n \"id\": 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ \ \"answerSpan\": {\n \"text\": \"\\nSurface Pro 4 has the ports
+ you expect in a full-feature laptop.\\n\\nFull-size USB 3.0 port Connect a
+ USB accessory like a mouse, printer, Ethernet adapter, USB drive, or smartphone\",\n
+ \ \"confidenceScore\": 0.5141,\n \"offset\": 25,\n \"length\":
+ 180\n }\n },\n {\n \"questions\": [\n \"Connect monitors,
+ accessories, and other devices\"\n ],\n \"answer\": \"**Connect
+ monitors, accessories, and other devices**\\n\\nYou can connect monitors,
+ accessories, and other devices directly to your Surface Pro 4 using the USB
+ port, Mini DisplayPort, or Bluetooth. Or, connect everything to a Surface
+ Dock (sold separately). With Surface Dock, you can switch between fully connected
+ and fully mobile with a single connector.\",\n \"confidenceScore\": 0.4067,\n
+ \ \"id\": 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
\ \"metadata\": {\n \"explicitlytaggedheading\": \"connect monitors,
accessories, and other devices\"\n },\n \"dialog\": {\n \"isContextOnly\":
false,\n \"prompts\": [\n {\n \"displayOrder\":
@@ -92,6 +92,10 @@ interactions:
workspace with Surface Dock\"\n },\n {\n \"displayOrder\":
1,\n \"qnaId\": 66,\n \"displayText\": \"Connect or
project to a monitor, screen, or other display\"\n }\n ]\n
+ \ },\n \"answerSpan\": {\n \"text\": \"\\nYou can connect
+ monitors, accessories, and other devices directly to your Surface Pro 4 using
+ the USB port, Mini DisplayPort, or Bluetooth\",\n \"confidenceScore\":
+ 0.051399999999999994,\n \"offset\": 53,\n \"length\": 140\n
\ }\n },\n {\n \"questions\": [\n \"Projector or monitor.\"\n
\ ],\n \"answer\": \"If your monitor has a DisplayPort, you can connect
it to your Surface using a DisplayPort to Mini DisplayPort cable (sold separately).
@@ -100,19 +104,19 @@ interactions:
is for video only. Audio will play from your Surface speakers unless you\u2019ve
connected external speakers. For more info about this, see [Surface sound,](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
[volume, and audio accessories](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
- on Surface.com.\",\n \"confidenceScore\": 0.4307,\n \"id\": 68,\n
+ on Surface.com.\",\n \"confidenceScore\": 0.3392,\n \"id\": 68,\n
\ \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{},\n \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n }\n ]\n}"
headers:
- apim-request-id: 3dbc4527-4e20-44b3-abbe-d380ea18ef2c
- content-length: '7474'
+ apim-request-id: ec633ba0-764f-4865-8e8b-69ffbc1f74f5
+ content-length: '7719'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
- date: Tue, 21 Sep 2021 21:06:47 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ date: Thu, 30 Sep 2021 15:52:37 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '300'
+ x-envoy-upstream-service-time: '663'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_only_id.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_only_id.yaml
index 7f4f92ee521c..c46b8b79716e 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_only_id.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_only_id.yaml
@@ -24,14 +24,14 @@ interactions:
\ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n }\n ]\n}"
headers:
- apim-request-id: 24be7932-f7a1-4019-8472-32c2551d1d6e
+ apim-request-id: 824a47b2-1f4a-4056-8c26-ddba66a21497
content-length: '583'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
- date: Tue, 21 Sep 2021 21:06:49 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ date: Thu, 30 Sep 2021 15:52:35 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '132'
+ x-envoy-upstream-service-time: '879'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_overload.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_overload.yaml
index a258665e5354..c4cf69673348 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_overload.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_overload.yaml
@@ -21,33 +21,26 @@ interactions:
your battery last\"\n ],\n \"answer\": \"**Make your battery last**\\n\\nFor
info on how to care for your battery and power supply, conserve power, and
make your Surface battery last longer, see [Surface battery and power](http://www.microsoft.com/surface/support/hardware-and-drivers/battery-and-power)
- on Surface.com.\",\n \"confidenceScore\": 0.9290999999999999,\n \"id\":
+ on Surface.com.\",\n \"confidenceScore\": 0.9201999999999999,\n \"id\":
27,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{\n \"explicitlytaggedheading\": \"make your battery last\"\n },\n
\ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n },\n {\n \"questions\": [\n \"Check the battery
level\"\n ],\n \"answer\": \"**Check the battery level**\\n\\nYou
can check the battery level from the lock screen or the desktop:\",\n \"confidenceScore\":
- 0.3069,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ 0.4412,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
\ \"metadata\": {\n \"explicitlytaggedheading\": \"check the battery
level\"\n },\n \"dialog\": {\n \"isContextOnly\": false,\n
- \ \"prompts\": []\n }\n },\n {\n \"questions\": [\n
- \ \"Desktop taskbar.\"\n ],\n \"answer\": \"**Desktop taskbar.**\\n\\nBattery
- status appears at the right side of the taskbar. Select the battery icon for
- info about the charging and battery status, including the percent remaining.
- \u272A\",\n \"confidenceScore\": 0.2548,\n \"id\": 26,\n \"source\":
- \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\": {},\n \"dialog\":
- {\n \"isContextOnly\": false,\n \"prompts\": []\n }\n }\n
- \ ]\n}"
+ \ \"prompts\": []\n }\n }\n ]\n}"
headers:
- apim-request-id: 8aa31a73-6acd-4f7a-85c2-dcecc522ae32
- content-length: '1621'
+ apim-request-id: cd3dad50-3644-4273-afc0-442c50d22419
+ content-length: '1140'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
- date: Tue, 21 Sep 2021 21:06:50 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ date: Thu, 30 Sep 2021 15:52:35 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '328'
+ x-envoy-upstream-service-time: '1047'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_python_dict.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_python_dict.yaml
index 84d90331cb91..2b3cc394ee2c 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_python_dict.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_python_dict.yaml
@@ -24,14 +24,14 @@ interactions:
\ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n }\n ]\n}"
headers:
- apim-request-id: c6c85372-bf1f-4f8c-98ce-8de52137882a
+ apim-request-id: 9980db27-58c1-485e-995a-933a4e41e08c
content-length: '583'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
- date: Tue, 21 Sep 2021 21:06:51 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ date: Thu, 30 Sep 2021 15:52:34 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '139'
+ x-envoy-upstream-service-time: '135'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_with_answerspan.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_with_answerspan.yaml
index df113eda18b8..8e6127f52048 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_with_answerspan.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_with_answerspan.yaml
@@ -73,18 +73,18 @@ interactions:
\ \"qnaId\": 8,\n \"displayText\": \"Memory and storage\"\n
\ },\n {\n \"displayOrder\": 3,\n \"qnaId\":
9,\n \"displayText\": \"Sensors\"\n }\n ]\n },\n
- \ \"answerSpan\": {\n \"text\": \"**Ports and connectors**\\n\\nSurface
- Pro 4 has the ports you expect in a full-feature laptop.\\n\\nFull-size USB
- 3.0 port Connect a USB accessory like a mouse, printer, Ethernet adapter,
- USB drive, or smartphone.\",\n \"confidenceScore\": 0.0106,\n \"offset\":
- 0,\n \"length\": 206\n }\n },\n {\n \"questions\":
- [\n \"Connect monitors, accessories, and other devices\"\n ],\n
- \ \"answer\": \"**Connect monitors, accessories, and other devices**\\n\\nYou
- can connect monitors, accessories, and other devices directly to your Surface
- Pro 4 using the USB port, Mini DisplayPort, or Bluetooth. Or, connect everything
- to a Surface Dock (sold separately). With Surface Dock, you can switch between
- fully connected and fully mobile with a single connector.\",\n \"confidenceScore\":
- 0.46380000000000005,\n \"id\": 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ \ \"answerSpan\": {\n \"text\": \"\\nSurface Pro 4 has the ports
+ you expect in a full-feature laptop.\\n\\nFull-size USB 3.0 port Connect a
+ USB accessory like a mouse, printer, Ethernet adapter, USB drive, or smartphone\",\n
+ \ \"confidenceScore\": 0.5141,\n \"offset\": 25,\n \"length\":
+ 180\n }\n },\n {\n \"questions\": [\n \"Connect monitors,
+ accessories, and other devices\"\n ],\n \"answer\": \"**Connect
+ monitors, accessories, and other devices**\\n\\nYou can connect monitors,
+ accessories, and other devices directly to your Surface Pro 4 using the USB
+ port, Mini DisplayPort, or Bluetooth. Or, connect everything to a Surface
+ Dock (sold separately). With Surface Dock, you can switch between fully connected
+ and fully mobile with a single connector.\",\n \"confidenceScore\": 0.4067,\n
+ \ \"id\": 64,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
\ \"metadata\": {\n \"explicitlytaggedheading\": \"connect monitors,
accessories, and other devices\"\n },\n \"dialog\": {\n \"isContextOnly\":
false,\n \"prompts\": [\n {\n \"displayOrder\":
@@ -92,6 +92,10 @@ interactions:
workspace with Surface Dock\"\n },\n {\n \"displayOrder\":
1,\n \"qnaId\": 66,\n \"displayText\": \"Connect or
project to a monitor, screen, or other display\"\n }\n ]\n
+ \ },\n \"answerSpan\": {\n \"text\": \"\\nYou can connect
+ monitors, accessories, and other devices directly to your Surface Pro 4 using
+ the USB port, Mini DisplayPort, or Bluetooth\",\n \"confidenceScore\":
+ 0.051399999999999994,\n \"offset\": 53,\n \"length\": 140\n
\ }\n },\n {\n \"questions\": [\n \"Projector or monitor.\"\n
\ ],\n \"answer\": \"If your monitor has a DisplayPort, you can connect
it to your Surface using a DisplayPort to Mini DisplayPort cable (sold separately).
@@ -100,19 +104,19 @@ interactions:
is for video only. Audio will play from your Surface speakers unless you\u2019ve
connected external speakers. For more info about this, see [Surface sound,](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
[volume, and audio accessories](http://www.microsoft.com/surface/support/hardware-and-drivers/sound-volume-and-speakers)
- on Surface.com.\",\n \"confidenceScore\": 0.4307,\n \"id\": 68,\n
+ on Surface.com.\",\n \"confidenceScore\": 0.3392,\n \"id\": 68,\n
\ \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{},\n \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n }\n ]\n}"
headers:
- apim-request-id: f107eb5a-8eb0-4d0c-8630-bf8a6e8ed463
- content-length: '7474'
+ apim-request-id: db614ffe-6d30-40e1-96a9-f90e41d9dd0d
+ content-length: '7719'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
- date: Tue, 21 Sep 2021 21:06:51 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ date: Thu, 30 Sep 2021 15:52:35 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '325'
+ x-envoy-upstream-service-time: '633'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_with_dictparams.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_with_dictparams.yaml
index 1f36421fa211..513de2a22841 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_with_dictparams.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_with_dictparams.yaml
@@ -21,33 +21,26 @@ interactions:
your battery last\"\n ],\n \"answer\": \"**Make your battery last**\\n\\nFor
info on how to care for your battery and power supply, conserve power, and
make your Surface battery last longer, see [Surface battery and power](http://www.microsoft.com/surface/support/hardware-and-drivers/battery-and-power)
- on Surface.com.\",\n \"confidenceScore\": 0.9290999999999999,\n \"id\":
+ on Surface.com.\",\n \"confidenceScore\": 0.9201999999999999,\n \"id\":
27,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{\n \"explicitlytaggedheading\": \"make your battery last\"\n },\n
\ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n },\n {\n \"questions\": [\n \"Check the battery
level\"\n ],\n \"answer\": \"**Check the battery level**\\n\\nYou
can check the battery level from the lock screen or the desktop:\",\n \"confidenceScore\":
- 0.3069,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ 0.4412,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
\ \"metadata\": {\n \"explicitlytaggedheading\": \"check the battery
level\"\n },\n \"dialog\": {\n \"isContextOnly\": false,\n
- \ \"prompts\": []\n }\n },\n {\n \"questions\": [\n
- \ \"Desktop taskbar.\"\n ],\n \"answer\": \"**Desktop taskbar.**\\n\\nBattery
- status appears at the right side of the taskbar. Select the battery icon for
- info about the charging and battery status, including the percent remaining.
- \u272A\",\n \"confidenceScore\": 0.2548,\n \"id\": 26,\n \"source\":
- \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\": {},\n \"dialog\":
- {\n \"isContextOnly\": false,\n \"prompts\": []\n }\n }\n
- \ ]\n}"
+ \ \"prompts\": []\n }\n }\n ]\n}"
headers:
- apim-request-id: acaef643-3f2d-4397-aca0-2c4bcc8bb7a2
- content-length: '1621'
+ apim-request-id: e780b038-2930-45fc-99b3-35255fff74ec
+ content-length: '1140'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
- date: Tue, 21 Sep 2021 21:06:53 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ date: Thu, 30 Sep 2021 15:52:36 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '250'
+ x-envoy-upstream-service-time: '578'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_with_followup.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_with_followup.yaml
index 69a285e55600..fa6114118971 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_with_followup.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_with_followup.yaml
@@ -21,33 +21,26 @@ interactions:
your battery last\"\n ],\n \"answer\": \"**Make your battery last**\\n\\nFor
info on how to care for your battery and power supply, conserve power, and
make your Surface battery last longer, see [Surface battery and power](http://www.microsoft.com/surface/support/hardware-and-drivers/battery-and-power)
- on Surface.com.\",\n \"confidenceScore\": 0.9290999999999999,\n \"id\":
+ on Surface.com.\",\n \"confidenceScore\": 0.9201999999999999,\n \"id\":
27,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
{\n \"explicitlytaggedheading\": \"make your battery last\"\n },\n
\ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[]\n }\n },\n {\n \"questions\": [\n \"Check the battery
level\"\n ],\n \"answer\": \"**Check the battery level**\\n\\nYou
can check the battery level from the lock screen or the desktop:\",\n \"confidenceScore\":
- 0.3069,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ 0.4412,\n \"id\": 24,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
\ \"metadata\": {\n \"explicitlytaggedheading\": \"check the battery
level\"\n },\n \"dialog\": {\n \"isContextOnly\": false,\n
- \ \"prompts\": []\n }\n },\n {\n \"questions\": [\n
- \ \"Desktop taskbar.\"\n ],\n \"answer\": \"**Desktop taskbar.**\\n\\nBattery
- status appears at the right side of the taskbar. Select the battery icon for
- info about the charging and battery status, including the percent remaining.
- \u272A\",\n \"confidenceScore\": 0.2548,\n \"id\": 26,\n \"source\":
- \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\": {},\n \"dialog\":
- {\n \"isContextOnly\": false,\n \"prompts\": []\n }\n }\n
- \ ]\n}"
+ \ \"prompts\": []\n }\n }\n ]\n}"
headers:
- apim-request-id: 5a7bbcf4-75f5-4eeb-a994-b8bcd5d2907f
- content-length: '1621'
+ apim-request-id: 4e07975b-1254-451c-8173-9fc8603cadca
+ content-length: '1140'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
- date: Tue, 21 Sep 2021 21:06:53 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ date: Thu, 30 Sep 2021 15:52:36 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '251'
+ x-envoy-upstream-service-time: '368'
status:
code: 200
message: OK
@@ -80,7 +73,7 @@ interactions:
like a phone, while your Surface charges. The USB port on the power supply
is only for charging, not for data transfer. If you want to use a USB device,
plug it into the USB port on your Surface.\",\n \"confidenceScore\":
- 0.6517000000000001,\n \"id\": 23,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
+ 0.5296,\n \"id\": 23,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
\ \"metadata\": {\n \"explicitlytaggedheading\": \"power and charging\"\n
\ },\n \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
[\n {\n \"displayOrder\": 0,\n \"qnaId\": 24,\n
@@ -90,26 +83,26 @@ interactions:
2,\n \"qnaId\": 26,\n \"displayText\": \"Desktop taskbar.\"\n
\ },\n {\n \"displayOrder\": 3,\n \"qnaId\":
27,\n \"displayText\": \"Make your battery last\"\n }\n
- \ ]\n },\n \"answerSpan\": {\n \"text\": \"two to four
- hours\",\n \"confidenceScore\": 0.3086,\n \"offset\": 33,\n
- \ \"length\": 18\n }\n },\n {\n \"questions\": [\n \"Charge
+ \ ]\n },\n \"answerSpan\": {\n \"text\": \" two to
+ four hours\",\n \"confidenceScore\": 0.3795,\n \"offset\": 32,\n
+ \ \"length\": 19\n }\n },\n {\n \"questions\": [\n \"Charge
your Surface Pro 4\"\n ],\n \"answer\": \"**Charge your Surface
Pro 4**\\n\\n1. Connect the two parts of the power cord.\\n\\n2. Connect
the power cord securely to the charging port.\\n\\n3. Plug the power supply
- into an electrical outlet.\",\n \"confidenceScore\": 0.30260000000000004,\n
- \ \"id\": 19,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n
- \ \"metadata\": {\n \"explicitlytaggedheading\": \"charge your
- surface pro 4\"\n },\n \"dialog\": {\n \"isContextOnly\":
- false,\n \"prompts\": []\n }\n }\n ]\n}"
+ into an electrical outlet.\",\n \"confidenceScore\": 0.2707,\n \"id\":
+ 19,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
+ {\n \"explicitlytaggedheading\": \"charge your surface pro 4\"\n },\n
+ \ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
+ []\n }\n }\n ]\n}"
headers:
- apim-request-id: 33ee27bc-abd8-493e-9ffb-ac0259f68ad7
- content-length: '2178'
+ apim-request-id: 7b827f35-7711-46af-8757-f33d0a2ff551
+ content-length: '2154'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTransaction=1
- date: Tue, 21 Sep 2021 21:06:53 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ date: Thu, 30 Sep 2021 15:52:38 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '338'
+ x-envoy-upstream-service-time: '571'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text.yaml
index 31912fc7ddc9..b1a0f0a77e70 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text.yaml
@@ -93,7 +93,7 @@ interactions:
USB 3.1 Always-On, USB 3.1, Headphone/Mic, microSD RAM16GB RAM Upgradable to16GB
Size12.1 x 8.3 x .57 inches Touchpad Size4.3 x 2.2 inches USB Ports3 Video Memory2GB
Warranty/Supportone-year warranty. Weight2.8 pounds Wi-Fi802.11ac Wi-Fi ModelIntel
- Wireless-AC 9560 "}], "language": "en", "stringIndexType": "TextElements_v8"}'
+ Wireless-AC 9560 "}], "language": "en", "stringIndexType": "UnicodeCodePoint"}'
headers:
Accept:
- application/json
@@ -102,7 +102,7 @@ interactions:
Connection:
- keep-alive
Content-Length:
- - '7447'
+ - '7448'
Content-Type:
- application/json
User-Agent:
@@ -119,7 +119,7 @@ interactions:
Battery Life - Longest Lasting Laptop Batteries Competing laptops like the
ZenBook S UX391UA (7:05), Surface Laptop 2 (9:22) and Notebook 9 Pro (8:53)
outstayed the 4K Envy 13 but powered down long before the 1080p version.\",\n
- \ \"confidenceScore\": 0.017458289861679077,\n \"id\": \"doc3\",\n
+ \ \"confidenceScore\": 0.01745828054845333,\n \"id\": \"doc3\",\n
\ \"answerSpan\": {\n \"text\": \"Battery Life\",\n \"confidenceScore\":
0.26247412,\n \"offset\": 0,\n \"length\": 12\n },\n \"offset\":
1779,\n \"length\": 555\n },\n {\n \"answer\": \"Along with
@@ -127,36 +127,36 @@ interactions:
ultraportable chassis, fast performance, and powerful speakers. Best of all,
the Envy 13 starts at a reasonable $799, which is hundreds less than the competition.
In many ways, the Envy 13 is what we wanted the new MacBook Air to be.\",\n
- \ \"confidenceScore\": 0.009401722811162472,\n \"id\": \"doc3\",\n
+ \ \"confidenceScore\": 0.00940172653645277,\n \"id\": \"doc3\",\n
\ \"answerSpan\": {\n \"text\": \"battery life\",\n \"confidenceScore\":
- 0.3530523,\n \"offset\": 27,\n \"length\": 13\n },\n \"offset\":
+ 0.35305238,\n \"offset\": 27,\n \"length\": 13\n },\n \"offset\":
4508,\n \"length\": 319\n },\n {\n \"answer\": \"We also recommend
the Samsung Notebook 9 Pro, which has a similarly premium design but much
better battery life than the 4K Envy. The Microsoft Surface Laptop 2 is another
recommended alternative, though you might want to wait a few months for the
rumored Surface Laptop 3. Overall, the HP Envy 13 is a fantastic laptop
that checks all the right boxes --- as long as you buy the 1080p model.\",\n
- \ \"confidenceScore\": 0.0070572528056800365,\n \"id\": \"doc3\",\n
+ \ \"confidenceScore\": 0.007057250943034887,\n \"id\": \"doc3\",\n
\ \"answerSpan\": {\n \"text\": \"battery life\",\n \"confidenceScore\":
- 0.5914322,\n \"offset\": 98,\n \"length\": 13\n },\n \"offset\":
+ 0.59143245,\n \"offset\": 98,\n \"length\": 13\n },\n \"offset\":
5391,\n \"length\": 393\n }\n ]\n}"
headers:
apim-request-id:
- - 8606d2ff-3166-4f3d-bc8c-b89815b47dfa
+ - abf593a7-19a1-45a9-b94c-e590f8f3bf5a
content-length:
- - '2147'
+ - '2146'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=9
date:
- - Tue, 21 Sep 2021 21:06:55 GMT
+ - Thu, 30 Sep 2021 15:52:33 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '733'
+ - '323'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_llc.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_llc.yaml
index 17cc1c469747..3a55e7c933ba 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_llc.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_llc.yaml
@@ -119,7 +119,7 @@ interactions:
Battery Life - Longest Lasting Laptop Batteries Competing laptops like the
ZenBook S UX391UA (7:05), Surface Laptop 2 (9:22) and Notebook 9 Pro (8:53)
outstayed the 4K Envy 13 but powered down long before the 1080p version.\",\n
- \ \"confidenceScore\": 0.017458289861679077,\n \"id\": \"doc3\",\n
+ \ \"confidenceScore\": 0.01745828054845333,\n \"id\": \"doc3\",\n
\ \"answerSpan\": {\n \"text\": \"Battery Life\",\n \"confidenceScore\":
0.26247412,\n \"offset\": 0,\n \"length\": 12\n },\n \"offset\":
1779,\n \"length\": 555\n },\n {\n \"answer\": \"Along with
@@ -127,9 +127,9 @@ interactions:
ultraportable chassis, fast performance, and powerful speakers. Best of all,
the Envy 13 starts at a reasonable $799, which is hundreds less than the competition.
In many ways, the Envy 13 is what we wanted the new MacBook Air to be.\",\n
- \ \"confidenceScore\": 0.00940172653645277,\n \"id\": \"doc3\",\n
+ \ \"confidenceScore\": 0.009401722811162472,\n \"id\": \"doc3\",\n
\ \"answerSpan\": {\n \"text\": \"battery life\",\n \"confidenceScore\":
- 0.35305238,\n \"offset\": 27,\n \"length\": 13\n },\n \"offset\":
+ 0.3530523,\n \"offset\": 27,\n \"length\": 13\n },\n \"offset\":
4508,\n \"length\": 319\n },\n {\n \"answer\": \"We also recommend
the Samsung Notebook 9 Pro, which has a similarly premium design but much
better battery life than the 4K Envy. The Microsoft Surface Laptop 2 is another
@@ -142,21 +142,21 @@ interactions:
5391,\n \"length\": 393\n }\n ]\n}"
headers:
apim-request-id:
- - 77da498d-0205-4cdb-a4ea-034ac73ea176
+ - 63f09596-dbf2-44ac-a8f9-c3de2917c426
content-length:
- - '2146'
+ - '2145'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=9
date:
- - Tue, 21 Sep 2021 21:06:56 GMT
+ - Thu, 30 Sep 2021 15:52:33 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '474'
+ - '304'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_overload.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_overload.yaml
index e4628e088f8f..fb3f72c08da0 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_overload.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_overload.yaml
@@ -8,7 +8,7 @@ interactions:
your Surface Pro 4 power supply to charge other devices, like a phone, while
your Surface charges. The USB port on the power supply is only for charging,
not for data transfer. If you want to use a USB device, plug it into the USB
- port on your Surface."}], "stringIndexType": "TextElements_v8"}'
+ port on your Surface."}], "language": "en", "stringIndexType": "UnicodeCodePoint"}'
headers:
Accept:
- application/json
@@ -17,7 +17,7 @@ interactions:
Connection:
- keep-alive
Content-Length:
- - '688'
+ - '707'
Content-Type:
- application/json
User-Agent:
@@ -30,39 +30,39 @@ interactions:
It takes two to four hours to charge the Surface Pro 4 battery fully from
an empty state. It can take longer if you\u2019re using your Surface for power-intensive
activities like gaming or video streaming while you\u2019re charging it.\",\n
- \ \"confidenceScore\": 0.9298818111419678,\n \"id\": \"0\",\n \"answerSpan\":
+ \ \"confidenceScore\": 0.9298818707466125,\n \"id\": \"0\",\n \"answerSpan\":
{\n \"text\": \"two to four hours\",\n \"confidenceScore\":
0.98579097,\n \"offset\": 28,\n \"length\": 18\n },\n \"offset\":
0,\n \"length\": 245\n },\n {\n \"answer\": \"It takes two
to four hours to charge the Surface Pro 4 battery fully from an empty state.
It can take longer if you\u2019re using your Surface for power-intensive activities
like gaming or video streaming while you\u2019re charging it.\",\n \"confidenceScore\":
- 0.9254359602928162,\n \"id\": \"0\",\n \"answerSpan\": {\n \"text\":
+ 0.9254360198974609,\n \"id\": \"0\",\n \"answerSpan\": {\n \"text\":
\"two to four hours\",\n \"confidenceScore\": 0.98562825,\n \"offset\":
8,\n \"length\": 18\n },\n \"offset\": 20,\n \"length\":
225\n },\n {\n \"answer\": \"It can take longer if you\u2019re
using your Surface for power-intensive activities like gaming or video streaming
while you\u2019re charging it.\",\n \"confidenceScore\": 0.05503516271710396,\n
\ \"id\": \"0\",\n \"answerSpan\": {\n \"text\": \"longer\",\n
- \ \"confidenceScore\": 0.624118,\n \"offset\": 11,\n \"length\":
+ \ \"confidenceScore\": 0.62411773,\n \"offset\": 11,\n \"length\":
7\n },\n \"offset\": 110,\n \"length\": 135\n }\n ]\n}"
headers:
apim-request-id:
- - 16114d30-1c1d-48e1-a023-4d4507a0d97a
+ - f0c97e63-de33-41db-bebd-3a34cc810fad
content-length:
- - '1479'
+ - '1481'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=2
date:
- - Tue, 21 Sep 2021 21:06:57 GMT
+ - Thu, 30 Sep 2021 15:52:34 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '425'
+ - '255'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_with_dictparams.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_with_dictparams.yaml
index 4679ebd184a1..72d83dc51e68 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_with_dictparams.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_with_dictparams.yaml
@@ -8,7 +8,7 @@ interactions:
your Surface Pro 4 power supply to charge other devices, like a phone, while
your Surface charges. The USB port on the power supply is only for charging,
not for data transfer. If you want to use a USB device, plug it into the USB
- port on your Surface."}], "language": "en"}'
+ port on your Surface."}], "language": "en", "stringIndexType": "UnicodeCodePoint"}'
headers:
Accept:
- application/json
@@ -17,7 +17,7 @@ interactions:
Connection:
- keep-alive
Content-Length:
- - '668'
+ - '707'
Content-Type:
- application/json
User-Agent:
@@ -30,39 +30,39 @@ interactions:
It takes two to four hours to charge the Surface Pro 4 battery fully from
an empty state. It can take longer if you\u2019re using your Surface for power-intensive
activities like gaming or video streaming while you\u2019re charging it.\",\n
- \ \"confidenceScore\": 0.9298818111419678,\n \"id\": \"1\",\n \"answerSpan\":
+ \ \"confidenceScore\": 0.9298818707466125,\n \"id\": \"1\",\n \"answerSpan\":
{\n \"text\": \"two to four hours\",\n \"confidenceScore\":
0.98579097,\n \"offset\": 28,\n \"length\": 18\n },\n \"offset\":
0,\n \"length\": 245\n },\n {\n \"answer\": \"It takes two
to four hours to charge the Surface Pro 4 battery fully from an empty state.
It can take longer if you\u2019re using your Surface for power-intensive activities
like gaming or video streaming while you\u2019re charging it.\",\n \"confidenceScore\":
- 0.9254360198974609,\n \"id\": \"1\",\n \"answerSpan\": {\n \"text\":
- \"two to four hours\",\n \"confidenceScore\": 0.98562825,\n \"offset\":
+ 0.9254359602928162,\n \"id\": \"1\",\n \"answerSpan\": {\n \"text\":
+ \"two to four hours\",\n \"confidenceScore\": 0.9856282,\n \"offset\":
8,\n \"length\": 18\n },\n \"offset\": 20,\n \"length\":
225\n },\n {\n \"answer\": \"It can take longer if you\u2019re
using your Surface for power-intensive activities like gaming or video streaming
- while you\u2019re charging it.\",\n \"confidenceScore\": 0.05503516271710396,\n
+ while you\u2019re charging it.\",\n \"confidenceScore\": 0.05503518134355545,\n
\ \"id\": \"1\",\n \"answerSpan\": {\n \"text\": \"longer\",\n
\ \"confidenceScore\": 0.624118,\n \"offset\": 11,\n \"length\":
7\n },\n \"offset\": 110,\n \"length\": 135\n }\n ]\n}"
headers:
apim-request-id:
- - 6be5c655-0bad-49d5-ba56-38ba2a890866
+ - 00eefdf8-2d1a-4d4b-9d64-6d28ebc6b0a5
content-length:
- - '1479'
+ - '1478'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=2
date:
- - Tue, 21 Sep 2021 21:06:57 GMT
+ - Thu, 30 Sep 2021 15:52:34 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '271'
+ - '203'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_with_str_records.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_with_str_records.yaml
index 60c095f1d278..906db8907bfc 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_with_str_records.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text.test_query_text_with_str_records.yaml
@@ -8,7 +8,7 @@ interactions:
your Surface Pro 4 power supply to charge other devices, like a phone, while
your Surface charges. The USB port on the power supply is only for charging,
not for data transfer. If you want to use a USB device, plug it into the USB
- port on your Surface."}], "language": "en"}'
+ port on your Surface."}], "language": "en", "stringIndexType": "UnicodeCodePoint"}'
headers:
Accept:
- application/json
@@ -17,7 +17,7 @@ interactions:
Connection:
- keep-alive
Content-Length:
- - '668'
+ - '707'
Content-Type:
- application/json
User-Agent:
@@ -38,7 +38,7 @@ interactions:
It can take longer if you\u2019re using your Surface for power-intensive activities
like gaming or video streaming while you\u2019re charging it.\",\n \"confidenceScore\":
0.9254359602928162,\n \"id\": \"0\",\n \"answerSpan\": {\n \"text\":
- \"two to four hours\",\n \"confidenceScore\": 0.98562825,\n \"offset\":
+ \"two to four hours\",\n \"confidenceScore\": 0.9856282,\n \"offset\":
8,\n \"length\": 18\n },\n \"offset\": 20,\n \"length\":
225\n },\n {\n \"answer\": \"It can take longer if you\u2019re
using your Surface for power-intensive activities like gaming or video streaming
@@ -48,21 +48,21 @@ interactions:
7\n },\n \"offset\": 110,\n \"length\": 135\n }\n ]\n}"
headers:
apim-request-id:
- - 49bbe0b7-74a9-416e-9a82-fffdcca416fd
+ - 390cb02b-4245-4719-8556-59acc1863554
content-length:
- - '1479'
+ - '1478'
content-type:
- application/json; charset=utf-8
csp-billing-usage:
- - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
+ - CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=2
date:
- - Tue, 21 Sep 2021 21:06:58 GMT
+ - Thu, 30 Sep 2021 15:52:34 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '314'
+ - '243'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text.yaml
index 15245c82e840..33d966c39351 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text.yaml
@@ -93,12 +93,12 @@ interactions:
USB 3.1 Always-On, USB 3.1, Headphone/Mic, microSD RAM16GB RAM Upgradable to16GB
Size12.1 x 8.3 x .57 inches Touchpad Size4.3 x 2.2 inches USB Ports3 Video Memory2GB
Warranty/Supportone-year warranty. Weight2.8 pounds Wi-Fi802.11ac Wi-Fi ModelIntel
- Wireless-AC 9560 "}], "language": "en", "stringIndexType": "TextElements_v8"}'
+ Wireless-AC 9560 "}], "language": "en", "stringIndexType": "UnicodeCodePoint"}'
headers:
Accept:
- application/json
Content-Length:
- - '7447'
+ - '7448'
Content-Type:
- application/json
User-Agent:
@@ -115,9 +115,9 @@ interactions:
Battery Life - Longest Lasting Laptop Batteries Competing laptops like the
ZenBook S UX391UA (7:05), Surface Laptop 2 (9:22) and Notebook 9 Pro (8:53)
outstayed the 4K Envy 13 but powered down long before the 1080p version.\",\n
- \ \"confidenceScore\": 0.017458291724324226,\n \"id\": \"doc3\",\n
+ \ \"confidenceScore\": 0.01745828054845333,\n \"id\": \"doc3\",\n
\ \"answerSpan\": {\n \"text\": \"Battery Life\",\n \"confidenceScore\":
- 0.26247388,\n \"offset\": 0,\n \"length\": 12\n },\n \"offset\":
+ 0.26247412,\n \"offset\": 0,\n \"length\": 12\n },\n \"offset\":
1779,\n \"length\": 555\n },\n {\n \"answer\": \"Along with
11-plus hours of battery life (on the FHD model), the Envy 13 has a sleek,
ultraportable chassis, fast performance, and powerful speakers. Best of all,
@@ -132,19 +132,19 @@ interactions:
recommended alternative, though you might want to wait a few months for the
rumored Surface Laptop 3. Overall, the HP Envy 13 is a fantastic laptop
that checks all the right boxes --- as long as you buy the 1080p model.\",\n
- \ \"confidenceScore\": 0.0070572528056800365,\n \"id\": \"doc3\",\n
+ \ \"confidenceScore\": 0.007057250943034887,\n \"id\": \"doc3\",\n
\ \"answerSpan\": {\n \"text\": \"battery life\",\n \"confidenceScore\":
- 0.5914322,\n \"offset\": 98,\n \"length\": 13\n },\n \"offset\":
+ 0.59143245,\n \"offset\": 98,\n \"length\": 13\n },\n \"offset\":
5391,\n \"length\": 393\n }\n ]\n}"
headers:
- apim-request-id: 6509de25-150a-4b5d-852b-deec6ec8cf2c
- content-length: '2147'
+ apim-request-id: 51ce1817-cad9-4c5e-a2b3-4391b42b3211
+ content-length: '2146'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
- date: Tue, 21 Sep 2021 21:07:00 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=9
+ date: Thu, 30 Sep 2021 15:52:35 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '217'
+ x-envoy-upstream-service-time: '292'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_llc.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_llc.yaml
index cd71362f8393..0d9cdddeb235 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_llc.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_llc.yaml
@@ -115,9 +115,9 @@ interactions:
Battery Life - Longest Lasting Laptop Batteries Competing laptops like the
ZenBook S UX391UA (7:05), Surface Laptop 2 (9:22) and Notebook 9 Pro (8:53)
outstayed the 4K Envy 13 but powered down long before the 1080p version.\",\n
- \ \"confidenceScore\": 0.01745828054845333,\n \"id\": \"doc3\",\n
+ \ \"confidenceScore\": 0.01745828241109848,\n \"id\": \"doc3\",\n
\ \"answerSpan\": {\n \"text\": \"Battery Life\",\n \"confidenceScore\":
- 0.26247412,\n \"offset\": 0,\n \"length\": 12\n },\n \"offset\":
+ 0.26247388,\n \"offset\": 0,\n \"length\": 12\n },\n \"offset\":
1779,\n \"length\": 555\n },\n {\n \"answer\": \"Along with
11-plus hours of battery life (on the FHD model), the Envy 13 has a sleek,
ultraportable chassis, fast performance, and powerful speakers. Best of all,
@@ -132,19 +132,19 @@ interactions:
recommended alternative, though you might want to wait a few months for the
rumored Surface Laptop 3. Overall, the HP Envy 13 is a fantastic laptop
that checks all the right boxes --- as long as you buy the 1080p model.\",\n
- \ \"confidenceScore\": 0.0070572528056800365,\n \"id\": \"doc3\",\n
+ \ \"confidenceScore\": 0.007057250943034887,\n \"id\": \"doc3\",\n
\ \"answerSpan\": {\n \"text\": \"battery life\",\n \"confidenceScore\":
0.59143245,\n \"offset\": 98,\n \"length\": 13\n },\n \"offset\":
5391,\n \"length\": 393\n }\n ]\n}"
headers:
- apim-request-id: f3ab3c91-1bfb-4c45-a752-ca79c3e0a987
- content-length: '2147'
+ apim-request-id: bc9b76a5-d8f4-401f-a0d0-1e55341a56d0
+ content-length: '2146'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
- date: Tue, 21 Sep 2021 21:07:00 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=9
+ date: Thu, 30 Sep 2021 15:52:36 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '318'
+ x-envoy-upstream-service-time: '186'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_overload.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_overload.yaml
index 15d4d99520d4..c606f953a9d7 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_overload.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_overload.yaml
@@ -8,12 +8,12 @@ interactions:
your Surface Pro 4 power supply to charge other devices, like a phone, while
your Surface charges. The USB port on the power supply is only for charging,
not for data transfer. If you want to use a USB device, plug it into the USB
- port on your Surface."}], "stringIndexType": "TextElements_v8"}'
+ port on your Surface."}], "language": "en", "stringIndexType": "UnicodeCodePoint"}'
headers:
Accept:
- application/json
Content-Length:
- - '688'
+ - '707'
Content-Type:
- application/json
User-Agent:
@@ -26,7 +26,7 @@ interactions:
It takes two to four hours to charge the Surface Pro 4 battery fully from
an empty state. It can take longer if you\u2019re using your Surface for power-intensive
activities like gaming or video streaming while you\u2019re charging it.\",\n
- \ \"confidenceScore\": 0.9298818111419678,\n \"id\": \"0\",\n \"answerSpan\":
+ \ \"confidenceScore\": 0.9298818707466125,\n \"id\": \"0\",\n \"answerSpan\":
{\n \"text\": \"two to four hours\",\n \"confidenceScore\":
0.98579097,\n \"offset\": 28,\n \"length\": 18\n },\n \"offset\":
0,\n \"length\": 245\n },\n {\n \"answer\": \"It takes two
@@ -43,14 +43,14 @@ interactions:
\ \"confidenceScore\": 0.62411773,\n \"offset\": 11,\n \"length\":
7\n },\n \"offset\": 110,\n \"length\": 135\n }\n ]\n}"
headers:
- apim-request-id: 5f3c0c70-7d76-41fe-84fe-39b61be4f30c
+ apim-request-id: cbb90985-1d4b-4b29-a55e-e2a6bb2ff93b
content-length: '1481'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
- date: Tue, 21 Sep 2021 21:07:01 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=2
+ date: Thu, 30 Sep 2021 15:52:35 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '210'
+ x-envoy-upstream-service-time: '175'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_with_dictparams.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_with_dictparams.yaml
index 99885d8fbc99..4a8803314774 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_with_dictparams.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_with_dictparams.yaml
@@ -8,12 +8,12 @@ interactions:
your Surface Pro 4 power supply to charge other devices, like a phone, while
your Surface charges. The USB port on the power supply is only for charging,
not for data transfer. If you want to use a USB device, plug it into the USB
- port on your Surface."}], "language": "en"}'
+ port on your Surface."}], "language": "en", "stringIndexType": "UnicodeCodePoint"}'
headers:
Accept:
- application/json
Content-Length:
- - '668'
+ - '707'
Content-Type:
- application/json
User-Agent:
@@ -26,31 +26,31 @@ interactions:
It takes two to four hours to charge the Surface Pro 4 battery fully from
an empty state. It can take longer if you\u2019re using your Surface for power-intensive
activities like gaming or video streaming while you\u2019re charging it.\",\n
- \ \"confidenceScore\": 0.9298818111419678,\n \"id\": \"1\",\n \"answerSpan\":
+ \ \"confidenceScore\": 0.9298818707466125,\n \"id\": \"1\",\n \"answerSpan\":
{\n \"text\": \"two to four hours\",\n \"confidenceScore\":
0.98579097,\n \"offset\": 28,\n \"length\": 18\n },\n \"offset\":
0,\n \"length\": 245\n },\n {\n \"answer\": \"It takes two
to four hours to charge the Surface Pro 4 battery fully from an empty state.
It can take longer if you\u2019re using your Surface for power-intensive activities
like gaming or video streaming while you\u2019re charging it.\",\n \"confidenceScore\":
- 0.9254359006881714,\n \"id\": \"1\",\n \"answerSpan\": {\n \"text\":
+ 0.9254359602928162,\n \"id\": \"1\",\n \"answerSpan\": {\n \"text\":
\"two to four hours\",\n \"confidenceScore\": 0.9856282,\n \"offset\":
8,\n \"length\": 18\n },\n \"offset\": 20,\n \"length\":
225\n },\n {\n \"answer\": \"It can take longer if you\u2019re
using your Surface for power-intensive activities like gaming or video streaming
- while you\u2019re charging it.\",\n \"confidenceScore\": 0.055035144090652466,\n
+ while you\u2019re charging it.\",\n \"confidenceScore\": 0.05503518134355545,\n
\ \"id\": \"1\",\n \"answerSpan\": {\n \"text\": \"longer\",\n
- \ \"confidenceScore\": 0.62411773,\n \"offset\": 11,\n \"length\":
+ \ \"confidenceScore\": 0.624118,\n \"offset\": 11,\n \"length\":
7\n },\n \"offset\": 110,\n \"length\": 135\n }\n ]\n}"
headers:
- apim-request-id: 75d29f93-387d-42b6-92e9-d454c3b09683
- content-length: '1481'
+ apim-request-id: 5bd3db5f-1a20-4d42-8720-772e5f694ccd
+ content-length: '1478'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
- date: Tue, 21 Sep 2021 21:07:01 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=2
+ date: Thu, 30 Sep 2021 15:52:36 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '228'
+ x-envoy-upstream-service-time: '633'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_with_str_records.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_with_str_records.yaml
index a80212f864c3..4b1b34ebaadc 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_with_str_records.yaml
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_text_async.test_query_text_with_str_records.yaml
@@ -8,12 +8,12 @@ interactions:
your Surface Pro 4 power supply to charge other devices, like a phone, while
your Surface charges. The USB port on the power supply is only for charging,
not for data transfer. If you want to use a USB device, plug it into the USB
- port on your Surface."}], "language": "en"}'
+ port on your Surface."}], "language": "en", "stringIndexType": "UnicodeCodePoint"}'
headers:
Accept:
- application/json
Content-Length:
- - '668'
+ - '707'
Content-Type:
- application/json
User-Agent:
@@ -26,31 +26,31 @@ interactions:
It takes two to four hours to charge the Surface Pro 4 battery fully from
an empty state. It can take longer if you\u2019re using your Surface for power-intensive
activities like gaming or video streaming while you\u2019re charging it.\",\n
- \ \"confidenceScore\": 0.9298818111419678,\n \"id\": \"0\",\n \"answerSpan\":
+ \ \"confidenceScore\": 0.9298818707466125,\n \"id\": \"0\",\n \"answerSpan\":
{\n \"text\": \"two to four hours\",\n \"confidenceScore\":
0.98579097,\n \"offset\": 28,\n \"length\": 18\n },\n \"offset\":
0,\n \"length\": 245\n },\n {\n \"answer\": \"It takes two
to four hours to charge the Surface Pro 4 battery fully from an empty state.
It can take longer if you\u2019re using your Surface for power-intensive activities
like gaming or video streaming while you\u2019re charging it.\",\n \"confidenceScore\":
- 0.9254359602928162,\n \"id\": \"0\",\n \"answerSpan\": {\n \"text\":
+ 0.9254360198974609,\n \"id\": \"0\",\n \"answerSpan\": {\n \"text\":
\"two to four hours\",\n \"confidenceScore\": 0.98562825,\n \"offset\":
8,\n \"length\": 18\n },\n \"offset\": 20,\n \"length\":
225\n },\n {\n \"answer\": \"It can take longer if you\u2019re
using your Surface for power-intensive activities like gaming or video streaming
- while you\u2019re charging it.\",\n \"confidenceScore\": 0.055035144090652466,\n
+ while you\u2019re charging it.\",\n \"confidenceScore\": 0.05503518134355545,\n
\ \"id\": \"0\",\n \"answerSpan\": {\n \"text\": \"longer\",\n
- \ \"confidenceScore\": 0.62411773,\n \"offset\": 11,\n \"length\":
+ \ \"confidenceScore\": 0.624118,\n \"offset\": 11,\n \"length\":
7\n },\n \"offset\": 110,\n \"length\": 135\n }\n ]\n}"
headers:
- apim-request-id: 829dce06-09a4-4da9-9118-91a11be1b3e1
- content-length: '1482'
+ apim-request-id: 487b1ad3-a4b7-43d6-86aa-0b04254b2eef
+ content-length: '1479'
content-type: application/json; charset=utf-8
- csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=1
- date: Tue, 21 Sep 2021 21:07:02 GMT
+ csp-billing-usage: CognitiveServices.TextAnalytics.QuestionAnsweringTextRecords=2
+ date: Thu, 30 Sep 2021 15:52:35 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '178'
+ x-envoy-upstream-service-time: '199'
status:
code: 200
message: OK
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase.py
index dd279adccf87..86b66635e210 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase.py
@@ -4,7 +4,7 @@
# Licensed under the MIT License.
# ------------------------------------
import os
-
+import pytest
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
@@ -16,9 +16,12 @@
from azure.ai.language.questionanswering import QuestionAnsweringClient
from azure.ai.language.questionanswering.operations._operations import build_query_text_request, build_query_knowledge_base_request
from azure.ai.language.questionanswering.models import (
- KnowledgeBaseQueryOptions,
+ QueryKnowledgeBaseOptions,
KnowledgeBaseAnswerRequestContext,
AnswerSpanRequest,
+ MetadataFilter,
+ LogicalOperationKind,
+ QueryFilters,
)
@@ -125,7 +128,7 @@ def test_query_knowledgebase_llc_with_answerspan(self, qna_account, qna_key, qna
@GlobalQuestionAnsweringAccountPreparer()
def test_query_knowledgebase(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
- query_params = KnowledgeBaseQueryOptions(
+ query_params = QueryKnowledgeBaseOptions(
question="Ports and connectors",
top=3,
context=KnowledgeBaseAnswerRequestContext(
@@ -166,7 +169,7 @@ def test_query_knowledgebase(self, qna_account, qna_key, qna_project):
@GlobalQuestionAnsweringAccountPreparer()
def test_query_knowledgebase_with_answerspan(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
- query_params = KnowledgeBaseQueryOptions(
+ query_params = QueryKnowledgeBaseOptions(
question="Ports and connectors",
top=3,
context=KnowledgeBaseAnswerRequestContext(
@@ -238,7 +241,7 @@ def test_query_knowledgebase_with_dictparams(self, qna_account, qna_key, qna_pro
deployment_name='test'
)
- assert len(output.answers) == 3
+ assert len(output.answers) == 2
confident_answers = [a for a in output.answers if a.confidence_score > 0.9]
assert len(confident_answers) == 1
assert confident_answers[0].source == "surface-pro-4-user-guide-EN.pdf"
@@ -262,7 +265,7 @@ def test_query_knowledgebase_overload(self, qna_account, qna_key, qna_project):
include_unstructured_sources=True
)
- assert len(output.answers) == 3
+ assert len(output.answers) == 2
confident_answers = [a for a in output.answers if a.confidence_score > 0.9]
assert len(confident_answers) == 1
assert confident_answers[0].source == "surface-pro-4-user-guide-EN.pdf"
@@ -271,7 +274,7 @@ def test_query_knowledgebase_overload(self, qna_account, qna_key, qna_project):
def test_query_knowledgebase_with_followup(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
with client:
- query_params = KnowledgeBaseQueryOptions(
+ query_params = QueryKnowledgeBaseOptions(
question="How long should my Surface battery last?",
top=3,
user_id="sd53lsY=",
@@ -293,7 +296,7 @@ def test_query_knowledgebase_with_followup(self, qna_account, qna_key, qna_proje
assert len(confident_answers) == 1
assert confident_answers[0].source == "surface-pro-4-user-guide-EN.pdf"
- query_params = KnowledgeBaseQueryOptions(
+ query_params = QueryKnowledgeBaseOptions(
question="How long it takes to charge Surface?",
top=3,
user_id="sd53lsY=",
@@ -316,16 +319,16 @@ def test_query_knowledgebase_with_followup(self, qna_account, qna_key, qna_proje
)
assert len(output.answers) == 2
- confident_answers = [a for a in output.answers if a.confidence_score > 0.6]
+ confident_answers = [a for a in output.answers if a.confidence_score > 0.5]
assert len(confident_answers) == 1
- assert confident_answers[0].answer_span.text == "two to four hours"
+ assert confident_answers[0].answer_span.text == " two to four hours"
@GlobalQuestionAnsweringAccountPreparer()
def test_query_knowledgebase_only_id(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
with client:
- query_params = KnowledgeBaseQueryOptions(
+ query_params = QueryKnowledgeBaseOptions(
qna_id=19
)
@@ -349,4 +352,56 @@ def test_query_knowledgebase_python_dict(self, qna_account, qna_key, qna_project
deployment_name='test'
)
- assert len(output.answers) == 1
\ No newline at end of file
+ assert len(output.answers) == 1
+
+ def test_query_knowledgebase_overload_positional_and_kwarg(self):
+ with QuestionAnsweringClient("http://fake.com", AzureKeyCredential("123")) as client:
+ with pytest.raises(TypeError):
+ client.query_knowledge_base("positional_one", "positional_two")
+ with pytest.raises(TypeError):
+ client.query_knowledge_base("positional_options_bag", options="options bag by name")
+
+ def test_query_knowledgebase_question_or_qna_id(self):
+ with QuestionAnsweringClient("http://fake.com", AzureKeyCredential("123")) as client:
+
+ options = QueryKnowledgeBaseOptions()
+ with pytest.raises(TypeError):
+ client.query_knowledge_base(
+ options,
+ project_name="hello",
+ deployment_name='test'
+ )
+
+ with pytest.raises(TypeError):
+ client.query_knowledge_base(
+ project_name="hello",
+ deployment_name='test'
+ )
+
+ @GlobalQuestionAnsweringAccountPreparer()
+ def test_query_knowledgebase_filter(self, qna_account, qna_key, qna_project):
+ """Thanks to @heaths for this test!"""
+ filters = QueryFilters(
+ metadata_filter=MetadataFilter(
+ metadata=[
+ ("explicitlytaggedheading", "check the battery level"),
+ ("explicitlytaggedheading", "make your battery last")
+ ],
+ ),
+ logical_operation=LogicalOperationKind.OR_ENUM
+ )
+ with QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key)) as client:
+ response = client.query_knowledge_base(
+ project_name=qna_project,
+ deployment_name='test',
+ question="Battery life",
+ filters=filters,
+ top=3,
+ )
+ assert len(response.answers) == 3
+ assert any(
+ [a for a in response.answers if a.metadata.get('explicitlytaggedheading') == "check the battery level"]
+ )
+ assert any(
+ [a for a in response.answers if a.metadata.get('explicitlytaggedheading') == "make your battery last"]
+ )
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase_async.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase_async.py
index fda1d6cb265d..e90f684f1be3 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase_async.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase_async.py
@@ -16,9 +16,12 @@
from asynctestcase import AsyncQuestionAnsweringTest
from azure.ai.language.questionanswering.models import (
- KnowledgeBaseQueryOptions,
+ QueryKnowledgeBaseOptions,
KnowledgeBaseAnswerRequestContext,
AnswerSpanRequest,
+ QueryFilters,
+ MetadataFilter,
+ LogicalOperationKind,
)
from azure.ai.language.questionanswering.aio import QuestionAnsweringClient
from azure.ai.language.questionanswering.operations._operations import build_query_knowledge_base_request, build_query_text_request
@@ -127,7 +130,7 @@ async def test_query_knowledgebase_llc_with_answerspan(self, qna_account, qna_ke
@GlobalQuestionAnsweringAccountPreparer()
async def test_query_knowledgebase(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
- query_params = KnowledgeBaseQueryOptions(
+ query_params = QueryKnowledgeBaseOptions(
question="Ports and connectors",
top=3,
context=KnowledgeBaseAnswerRequestContext(
@@ -168,7 +171,7 @@ async def test_query_knowledgebase(self, qna_account, qna_key, qna_project):
@GlobalQuestionAnsweringAccountPreparer()
async def test_query_knowledgebase_with_answerspan(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
- query_params = KnowledgeBaseQueryOptions(
+ query_params = QueryKnowledgeBaseOptions(
question="Ports and connectors",
top=3,
context=KnowledgeBaseAnswerRequestContext(
@@ -239,7 +242,7 @@ async def test_query_knowledgebase_with_dictparams(self, qna_account, qna_key, q
deployment_name='test'
)
- assert len(output.answers) == 3
+ assert len(output.answers) == 2
confident_answers = [a for a in output.answers if a.confidence_score > 0.9]
assert len(confident_answers) == 1
assert confident_answers[0].source == "surface-pro-4-user-guide-EN.pdf"
@@ -263,7 +266,7 @@ async def test_query_knowledgebase_overload(self, qna_account, qna_key, qna_proj
include_unstructured_sources=True
)
- assert len(output.answers) == 3
+ assert len(output.answers) == 2
confident_answers = [a for a in output.answers if a.confidence_score > 0.9]
assert len(confident_answers) == 1
assert confident_answers[0].source == "surface-pro-4-user-guide-EN.pdf"
@@ -272,7 +275,7 @@ async def test_query_knowledgebase_overload(self, qna_account, qna_key, qna_proj
async def test_query_knowledgebase_with_followup(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
async with client:
- query_params = KnowledgeBaseQueryOptions(
+ query_params = QueryKnowledgeBaseOptions(
question="How long should my Surface battery last?",
top=3,
user_id="sd53lsY=",
@@ -294,7 +297,7 @@ async def test_query_knowledgebase_with_followup(self, qna_account, qna_key, qna
assert len(confident_answers) == 1
assert confident_answers[0].source == "surface-pro-4-user-guide-EN.pdf"
- query_params = KnowledgeBaseQueryOptions(
+ query_params = QueryKnowledgeBaseOptions(
question="How long it takes to charge Surface?",
top=3,
user_id="sd53lsY=",
@@ -317,9 +320,9 @@ async def test_query_knowledgebase_with_followup(self, qna_account, qna_key, qna
)
assert len(output.answers) == 2
- confident_answers = [a for a in output.answers if a.confidence_score > 0.6]
+ confident_answers = [a for a in output.answers if a.confidence_score > 0.5]
assert len(confident_answers) == 1
- assert confident_answers[0].answer_span.text == "two to four hours"
+ assert confident_answers[0].answer_span.text == " two to four hours"
@GlobalQuestionAnsweringAccountPreparer()
async def test_query_knowledgebase_only_id(self, qna_account, qna_key, qna_project):
@@ -348,3 +351,55 @@ async def test_query_knowledgebase_python_dict(self, qna_account, qna_key, qna_p
)
assert len(output.answers) == 1
+
+ async def test_query_knowledgebase_overload_positional_and_kwarg(self):
+ async with QuestionAnsweringClient("http://fake.com", AzureKeyCredential("123")) as client:
+ with pytest.raises(TypeError):
+ await client.query_knowledge_base("positional_one", "positional_two")
+ with pytest.raises(TypeError):
+ await client.query_knowledge_base("positional_options_bag", options="options bag by name")
+
+ async def test_query_knowledgebase_question_or_qna_id(self):
+ async with QuestionAnsweringClient("http://fake.com", AzureKeyCredential("123")) as client:
+
+ options = QueryKnowledgeBaseOptions()
+ with pytest.raises(TypeError):
+ await client.query_knowledge_base(
+ options,
+ project_name="hello",
+ deployment_name='test'
+ )
+
+ with pytest.raises(TypeError):
+ await client.query_knowledge_base(
+ project_name="hello",
+ deployment_name='test'
+ )
+
+ @GlobalQuestionAnsweringAccountPreparer()
+ async def test_query_knowledgebase_filter(self, qna_account, qna_key, qna_project):
+ """Thanks to @heaths for this test!"""
+ filters = QueryFilters(
+ metadata_filter=MetadataFilter(
+ metadata=[
+ ("explicitlytaggedheading", "check the battery level"),
+ ("explicitlytaggedheading", "make your battery last")
+ ],
+ ),
+ logical_operation=LogicalOperationKind.OR_ENUM
+ )
+ async with QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key)) as client:
+ response = await client.query_knowledge_base(
+ project_name=qna_project,
+ deployment_name='test',
+ question="Battery life",
+ filters=filters,
+ top=3,
+ )
+ assert len(response.answers) == 3
+ assert any(
+ [a for a in response.answers if a.metadata.get('explicitlytaggedheading') == "check the battery level"]
+ )
+ assert any(
+ [a for a in response.answers if a.metadata.get('explicitlytaggedheading') == "make your battery last"]
+ )
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text.py
index db55ce2867c1..434141a88a2d 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text.py
@@ -17,7 +17,7 @@
from azure.ai.language.questionanswering import QuestionAnsweringClient
from azure.ai.language.questionanswering.operations._operations import build_query_text_request, build_query_knowledge_base_request
from azure.ai.language.questionanswering.models import (
- TextQueryOptions,
+ QueryTextOptions,
TextRecord
)
@@ -67,7 +67,7 @@ def test_query_text_llc(self, qna_account, qna_key):
@GlobalQuestionAnsweringAccountPreparer()
def test_query_text(self, qna_account, qna_key):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
- params = TextQueryOptions(
+ params = QueryTextOptions(
question="What is the meaning of life?",
records=[
TextRecord(
@@ -180,3 +180,10 @@ def test_query_text_overload(self, qna_account, qna_key):
confident_answers = [a for a in output.answers if a.confidence_score > 0.9]
assert len(confident_answers) == 2
assert confident_answers[0].answer_span.text == "two to four hours"
+
+ def test_query_text_overload_positional_and_kwarg(self):
+ with QuestionAnsweringClient("http://fake.com", AzureKeyCredential("123")) as client:
+ with pytest.raises(TypeError):
+ client.query_text("positional_one", "positional_two")
+ with pytest.raises(TypeError):
+ client.query_text("positional_options_bag", options="options bag by name")
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text_async.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text_async.py
index a4fb8d110dfc..b5d6323a64db 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text_async.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text_async.py
@@ -17,7 +17,7 @@
from azure.ai.language.questionanswering.aio import QuestionAnsweringClient
from azure.ai.language.questionanswering.operations._operations import build_query_text_request, build_query_knowledge_base_request
from azure.ai.language.questionanswering.models import (
- TextQueryOptions,
+ QueryTextOptions,
TextRecord
)
@@ -69,7 +69,7 @@ async def test_query_text_llc(self, qna_account, qna_key):
@GlobalQuestionAnsweringAccountPreparer()
async def test_query_text(self, qna_account, qna_key):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
- params = TextQueryOptions(
+ params = QueryTextOptions(
question="What is the meaning of life?",
records=[
TextRecord(
@@ -181,3 +181,10 @@ async def test_query_text_overload(self, qna_account, qna_key):
confident_answers = [a for a in output.answers if a.confidence_score > 0.9]
assert len(confident_answers) == 2
assert confident_answers[0].answer_span.text == "two to four hours"
+
+ async def test_query_text_overload_positional_and_kwarg(self):
+ async with QuestionAnsweringClient("http://fake.com", AzureKeyCredential("123")) as client:
+ with pytest.raises(TypeError):
+ await client.query_text("positional_one", "positional_two")
+ with pytest.raises(TypeError):
+ await client.query_text("positional_options_bag", options="options bag by name")
diff --git a/sdk/cognitivelanguage/tests.yml b/sdk/cognitivelanguage/tests.yml
index 77ea573acbbc..cbe99bfbe34a 100644
--- a/sdk/cognitivelanguage/tests.yml
+++ b/sdk/cognitivelanguage/tests.yml
@@ -1,33 +1,10 @@
-trigger:
- branches:
- include:
- - master
- - main
- - hotfix/*
- - release/*
- - restapi*
- paths:
- include:
- - sdk/cognitivelanguage/
- - scripts/
-
-pr:
- branches:
- include:
- - master
- - main
- - feature/*
- - hotfix/*
- - release/*
- - restapi*
- paths:
- include:
- - sdk/cognitivelanguage/
+trigger: none
stages:
- template: ../../eng/pipelines/templates/stages/archetype-sdk-tests.yml
parameters:
AllocateResourceGroup: false
+ BuildTargetingString: azure*
ServiceDirectory: cognitivelanguage
MatrixReplace:
- TestSamples=.*/true
@@ -35,9 +12,13 @@ stages:
AZURE_QUESTIONANSWERING_KEY: $(qna-key)
AZURE_QUESTIONANSWERING_PROJECT: 190a9e13-8ede-4e4b-a8fd-c4d7f2aeab6c
AZURE_QUESTIONANSWERING_ENDPOINT: $(qna-uri)
+ AZURE_CONVERSATIONS_ENDPOINT: $(conversations-endpoint)
+ AZURE_CONVERSATIONS_KEY: $(conversations-key)
+ AZURE_CONVERSATIONS_PROJECT: $(conversations-project)
+ AZURE_CONVERSATIONS_WORKFLOW_PROJECT: $(conversations-workflow-project)
AZURE_CLIENT_ID: $(aad-azure-sdk-test-client-id)
AZURE_CLIENT_SECRET: $(aad-azure-sdk-test-client-secret)
AZURE_SUBSCRIPTION_ID: $(azure-subscription-id)
AZURE_TENANT_ID: $(aad-azure-sdk-test-tenant-id)
- TEST_MODE: 'RunLiveNoRecord' # use when allowing preparers to create the rgs for you
- AZURE_TEST_RUN_LIVE: 'true' # use when utilizing the New-TestResources Script
+ TEST_MODE: "RunLiveNoRecord" # use when allowing preparers to create the rgs for you
+ AZURE_TEST_RUN_LIVE: "true" # use when utilizing the New-TestResources Script
diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-contentmoderator/azure/cognitiveservices/vision/contentmoderator/operations/text_moderation_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-contentmoderator/azure/cognitiveservices/vision/contentmoderator/operations/text_moderation_operations.py
index baf744da1366..9254927d46cc 100644
--- a/sdk/cognitiveservices/azure-cognitiveservices-vision-contentmoderator/azure/cognitiveservices/vision/contentmoderator/operations/text_moderation_operations.py
+++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-contentmoderator/azure/cognitiveservices/vision/contentmoderator/operations/text_moderation_operations.py
@@ -35,10 +35,10 @@ def __init__(self, client, config, serializer, deserializer):
def screen_text(
self, text_content_type, text_content, language=None, autocorrect=False, pii=False, list_id=None, classify=False, custom_headers=None, raw=False, callback=None, **operation_config):
- """Detect profanity and match against custom and shared blacklists.
+ """Detect profanity and match against custom and shared blocklists.
Detects profanity in more than 100 languages and match against custom
- and shared blacklists.
+ and shared blocklists.
:param text_content_type: The content type. Possible values include:
'text/plain', 'text/html', 'text/xml', 'text/markdown'
diff --git a/sdk/core/azure-core/CHANGELOG.md b/sdk/core/azure-core/CHANGELOG.md
index a657078c52ff..c9111ed9eaa4 100644
--- a/sdk/core/azure-core/CHANGELOG.md
+++ b/sdk/core/azure-core/CHANGELOG.md
@@ -1,5 +1,15 @@
# Release History
+## 1.19.1 (Unreleased)
+
+### Features Added
+
+### Breaking Changes
+
+### Bugs Fixed
+
+### Other Changes
+
## 1.19.0 (2021-09-30)
### Breaking Changes in the Provisional `azure.core.rest` package
diff --git a/sdk/core/azure-core/azure/core/_version.py b/sdk/core/azure-core/azure/core/_version.py
index 7b0e621101b8..91bcd81c2857 100644
--- a/sdk/core/azure-core/azure/core/_version.py
+++ b/sdk/core/azure-core/azure/core/_version.py
@@ -9,4 +9,4 @@
# regenerated.
# --------------------------------------------------------------------------
-VERSION = "1.19.0"
+VERSION = "1.19.1"
diff --git a/sdk/core/azure-core/azure/core/pipeline/policies/_retry.py b/sdk/core/azure-core/azure/core/pipeline/policies/_retry.py
index b683e7e08ab2..5fe7223041e4 100644
--- a/sdk/core/azure-core/azure/core/pipeline/policies/_retry.py
+++ b/sdk/core/azure-core/azure/core/pipeline/policies/_retry.py
@@ -152,14 +152,14 @@ def _is_read_error(self, err):
def _is_method_retryable(self, settings, request, response=None):
"""Checks if a given HTTP method should be retried upon, depending if
- it is included on the method whitelist.
+ it is included on the method allowlist.
:param dict settings: The retry settings.
:param request: The PipelineRequest object.
:type request: ~azure.core.pipeline.PipelineRequest
:param response: The PipelineResponse object.
:type response: ~azure.core.pipeline.PipelineResponse
- :return: True if method should be retried upon. False if not in method whitelist.
+ :return: True if method should be retried upon. False if not in method allowlist.
:rtype: bool
"""
if response and request.method.upper() in ['POST', 'PATCH'] and \
@@ -173,7 +173,7 @@ def _is_method_retryable(self, settings, request, response=None):
def is_retry(self, settings, response):
"""Checks if method/status code is retryable.
- Based on whitelists and control variables such as the number of
+ Based on allowlists and control variables such as the number of
total retries to allow, whether to respect the Retry-After header,
whether this header is present, and whether the returned status
code is on the list of status codes to be retried upon on the
@@ -244,7 +244,7 @@ def increment(self, settings, response=None, error=None):
else:
# Incrementing because of a server error like a 500 in
- # status_forcelist and a the given method is in the whitelist
+ # status_forcelist and a the given method is in the allowlist
if response:
settings['status'] -= 1
if hasattr(response, 'http_request') and hasattr(response, 'http_response'):
diff --git a/sdk/cosmos/azure-cosmos/README.md b/sdk/cosmos/azure-cosmos/README.md
index dd6f1428aa06..88a8232cdc0c 100644
--- a/sdk/cosmos/azure-cosmos/README.md
+++ b/sdk/cosmos/azure-cosmos/README.md
@@ -96,33 +96,33 @@ Currently the features below are **not supported**. For alternatives options, ch
### Data Plane Limitations:
-* Group By queries (in roadmap for 2021).
-* Language Native async i/o (in roadmap for 2021).
-* Queries with COUNT from a DISTINCT subquery: SELECT COUNT (1) FROM (SELECT DISTINCT C.ID FROM C).
-* Bulk/Transactional batch processing.
-* Direct TCP Mode access.
-* Continuation token for cross partitions queries.
-* Change Feed: Processor.
-* Change Feed: Read multiple partitions key values.
-* Change Feed: Read specific time.
-* Change Feed: Read from the beggining.
-* Change Feed: Pull model.
-* Cross-partition ORDER BY for mixed types.
+* Group By queries
+* Language Native async i/o
+* Queries with COUNT from a DISTINCT subquery: SELECT COUNT (1) FROM (SELECT DISTINCT C.ID FROM C)
+* Bulk/Transactional batch processing
+* Direct TCP Mode access
+* Continuation token for cross partitions queries
+* Change Feed: Processor
+* Change Feed: Read multiple partitions key values
+* Change Feed: Read specific time
+* Change Feed: Read from the beggining
+* Change Feed: Pull model
+* Cross-partition ORDER BY for mixed types
* Integrated Cache using the default consistency level, that is "Session". To take advantage of the new [Cosmos DB Integrated Cache](https://docs.microsoft.com/azure/cosmos-db/integrated-cache), it is required to explicitly set CosmosClient consistency level to "Eventual": `consistency_level= Eventual`.
### Control Plane Limitations:
-* Get CollectionSizeUsage, DatabaseUsage, and DocumentUsage metrics.
-* Create Geospatial Index.
-* Provision Autoscale DBs or containers.
-* Update Autoscale throughput.
-* Update analytical store ttl (time to live).
-* Get the connection string.
-* Get the minimum RU/s of a container.
+* Get CollectionSizeUsage, DatabaseUsage, and DocumentUsage metrics
+* Create Geospatial Index
+* Provision Autoscale DBs or containers
+* Update Autoscale throughput
+* Update analytical store ttl (time to live)
+* Get the connection string
+* Get the minimum RU/s of a container
### Security Limitations:
-* AAD support.
+* AAD support
## Workarounds
diff --git a/sdk/eventhub/azure-eventhub-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/_vendor/storage/blob/_shared/policies.py b/sdk/eventhub/azure-eventhub-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/_vendor/storage/blob/_shared/policies.py
index c9bc798d671a..2db5048b67ef 100644
--- a/sdk/eventhub/azure-eventhub-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/_vendor/storage/blob/_shared/policies.py
+++ b/sdk/eventhub/azure-eventhub-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/_vendor/storage/blob/_shared/policies.py
@@ -75,7 +75,7 @@ def retry_hook(settings, **kwargs):
def is_retry(response, mode):
- """Is this method/status code retryable? (Based on whitelists and control
+ """Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
@@ -456,7 +456,7 @@ def increment(self, settings, request, response=None, error=None):
else:
# Incrementing because of a server error like a 500 in
- # status_forcelist and a the given method is in the whitelist
+ # status_forcelist and a the given method is in the allowlist
if response:
settings['status'] -= 1
settings['history'].append(RequestHistory(request, http_response=response))
diff --git a/sdk/eventhub/azure-eventhub-checkpointstoreblob/azure/eventhub/extensions/checkpointstoreblob/_vendor/storage/blob/_shared/policies.py b/sdk/eventhub/azure-eventhub-checkpointstoreblob/azure/eventhub/extensions/checkpointstoreblob/_vendor/storage/blob/_shared/policies.py
index c9bc798d671a..2db5048b67ef 100644
--- a/sdk/eventhub/azure-eventhub-checkpointstoreblob/azure/eventhub/extensions/checkpointstoreblob/_vendor/storage/blob/_shared/policies.py
+++ b/sdk/eventhub/azure-eventhub-checkpointstoreblob/azure/eventhub/extensions/checkpointstoreblob/_vendor/storage/blob/_shared/policies.py
@@ -75,7 +75,7 @@ def retry_hook(settings, **kwargs):
def is_retry(response, mode):
- """Is this method/status code retryable? (Based on whitelists and control
+ """Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
@@ -456,7 +456,7 @@ def increment(self, settings, request, response=None, error=None):
else:
# Incrementing because of a server error like a 500 in
- # status_forcelist and a the given method is in the whitelist
+ # status_forcelist and a the given method is in the allowlist
if response:
settings['status'] -= 1
settings['history'].append(RequestHistory(request, http_response=response))
diff --git a/sdk/eventhub/azure-eventhub-checkpointstoretable/azure/eventhub/extensions/checkpointstoretable/_vendor/data/tables/_policies.py b/sdk/eventhub/azure-eventhub-checkpointstoretable/azure/eventhub/extensions/checkpointstoretable/_vendor/data/tables/_policies.py
index ad5045703369..1cb05388280d 100644
--- a/sdk/eventhub/azure-eventhub-checkpointstoretable/azure/eventhub/extensions/checkpointstoretable/_vendor/data/tables/_policies.py
+++ b/sdk/eventhub/azure-eventhub-checkpointstoretable/azure/eventhub/extensions/checkpointstoretable/_vendor/data/tables/_policies.py
@@ -133,7 +133,7 @@ def __init__(self, **kwargs):
self.retry_to_secondary = kwargs.get('retry_to_secondary', False)
def is_retry(self, settings, response):
- """Is this method/status code retryable? (Based on whitelists and control
+ """Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
diff --git a/sdk/eventhub/azure-eventhub-checkpointstoretable/azure/eventhub/extensions/checkpointstoretable/_vendor/data/tables/aio/_policies_async.py b/sdk/eventhub/azure-eventhub-checkpointstoretable/azure/eventhub/extensions/checkpointstoretable/_vendor/data/tables/aio/_policies_async.py
index 96139f7c5b4e..5f23dece3524 100644
--- a/sdk/eventhub/azure-eventhub-checkpointstoretable/azure/eventhub/extensions/checkpointstoretable/_vendor/data/tables/aio/_policies_async.py
+++ b/sdk/eventhub/azure-eventhub-checkpointstoretable/azure/eventhub/extensions/checkpointstoretable/_vendor/data/tables/aio/_policies_async.py
@@ -56,7 +56,7 @@ def __init__(self, **kwargs):
self.retry_to_secondary = kwargs.get('retry_to_secondary', False)
def is_retry(self, settings, response):
- """Is this method/status code retryable? (Based on whitelists and control
+ """Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
diff --git a/sdk/eventhub/test-resources.json b/sdk/eventhub/test-resources.json
index f66ea2ac3756..7ad0213a48b6 100644
--- a/sdk/eventhub/test-resources.json
+++ b/sdk/eventhub/test-resources.json
@@ -59,7 +59,7 @@
"type": "string",
"defaultValue": "10",
"metadata": {
- "description": "The maximum duration, in minutes, that a single test is permitted to run before it is considered at-risk for being hung."
+ "description": "The maximum duration, in minutes, that a single test is permitted to run before it is considered at-risk of not responding"
}
}
},
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md b/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md
index f05f6ba48035..77de8118a781 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md
@@ -1,9 +1,11 @@
# Release History
-## 3.2.0b1 (Unreleased)
+## 3.2.0b1 (2021-10-05)
This version of the SDK defaults to the latest supported API version, which currently is v2021-09-30-preview.
+> Note: Starting with version 2021-09-30-preview, a new set of clients were introduced to leverage the newest features of the Form Recognizer service. Please see the [Migration Guide](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/MIGRATION_GUIDE.md) for detailed instructions on how to update application code from client library version 3.1.X or lower to the latest version. Also, please refer to the [README](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/README.md) for more information about the library.
+
### Features Added
- Added new `DocumentAnalysisClient` with `begin_analyze_document` and `begin_analyze_document_from_url` methods. Use these methods with the latest Form Recognizer
API version to analyze documents, with prebuilt and custom models.
@@ -13,10 +15,6 @@ API version to analyze documents, with prebuilt and custom models.
- Added samples using the `DocumentAnalysisClient` and `DocumentModelAdministrationClient` under `/samples/v3.2-beta`.
- Added `DocumentAnalysisApiVersion` to be used with `DocumentAnalysisClient` and `DocumentModelAdministrationClient`.
-### Breaking Changes
-
-### Bugs Fixed
-
### Other Changes
- Python 3.5 is no longer supported in this release.
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/MIGRATION_GUIDE.md b/sdk/formrecognizer/azure-ai-formrecognizer/MIGRATION_GUIDE.md
new file mode 100644
index 000000000000..340857f86d71
--- /dev/null
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/MIGRATION_GUIDE.md
@@ -0,0 +1,651 @@
+# Guide for migrating azure-ai-formrecognizer to version 3.2.x from versions 3.1.x and below
+
+This guide is intended to assist in the migration to `azure-ai-formrecognizer (3.2.x)` from versions `3.1.x` and below. It will focus on side-by-side comparisons for similar operations between versions. Please note that version `3.2.0b1` will be used for comparison with `3.1.2`.
+
+Familiarity with `azure-ai-formrecognizer (3.1.x and below)` package is assumed. For those new to the Azure Form Recognizer client library for Python please refer to the [README][readme] rather than this guide.
+
+## Table of Contents
+- [Migration benefits](#migration-benefits)
+- [Important changes](#important-changes)
+ - [Client usage](#client-usage)
+ - [Analyzing document](#analyzing-documents)
+ - [Analyzing a document with a custom model](#analyzing-a-document-with-a-custom-model)
+ - [Training a custom model](#training-a-custom-model)
+ - [Manage models](#manage-models)
+- [Additional samples](#additional-samples)
+
+## Migration benefits
+
+A natural question to ask when considering whether to adopt a new version of the library is what the benefits of doing so would be. As Azure Form Recognizer has matured and been embraced by a more diverse group of developers, we have been focused on learning the patterns and practices to best support developer productivity and add value to our customers.
+
+There are many benefits to using the new design of the `azure-ai-formrecognizer (3.2.x)` library. This new version of the library introduces two new clients `DocumentAnalysisClient` and the `DocumentModelAdministrationClient` with unified methods for analyzing documents and provides support for the new features added by the service in API version `2021-09-30-preview` and later.
+
+New features provided by the `DocumentAnalysisClient` include one consolidated method for analyzing document layout, a general prebuilt document model type, along with the same prebuilt models that were included previously (receipts, invoices, business cards, identity documents), and custom models. Moreover, the models introduced in the latest version of the library, such as `AnalyzeResult`, remove hierarchical dependencies between document elements and move them to a more top level and easily accessible position. The service has further improved how to define where elements are located on documents by moving towards `BoundingRegion` definitions allowing for cross-page elements. Document element fields are returned with more information, such as content and spans.
+
+When using the `DocumentModelAdministrationClient` to build, compose, or copy models, users can now assign their own model IDs and specify a description. Listing models on the administration client now includes both prebuilt and custom models. When using `get_model()`, users can get the field schema (field names and types that the model can extract) for the model they specified, including for prebuilt models. This client also provides functions for getting information from model operations.
+
+The below table describes the relationship of each client and its supported API version(s):
+
+|API version|Supported clients
+|-|-
+|2021-09-30-preview | DocumentAnalysisClient and DocumentModelAdministrationClient
+|2.1 | FormRecognizerClient and FormTrainingClient
+|2.0 | FormRecognizerClient and FormTrainingClient
+
+Please refer to the [README][readme] for more information on these new clients.
+
+## Important changes
+
+### Client usage
+
+We continue to support API key and AAD authentication methods when creating the clients. Below are the differences between the two versions:
+
+- In `3.2.x`, we have added `DocumentAnalysisClient` and `DocumentModelAdministrationClient` which support API version `2021-09-30-preview` and later.
+- `FormRecognizerClient` and `FormTrainingClient` will raise an error if called with an API version of `2021-09-30-preview` and later.
+- In `DocumentAnalysisClient` all prebuilt model methods along with custom model, layout, and a prebuilt document analysis model are unified into two methods called
+`begin_analyze_document` and `begin_analyze_document_from_url`.
+- In `FormRecognizerClient` there are two methods (a stream and URL method) for each of the prebuilt models supported by the service. This results in two methods for business card, receipt, identity document, and invoice models, along with a pair of methods for recognizing custom documents and for recognizing content/layout.
+
+Creating new clients in `3.1.x`:
+```python
+from azure.core.credentials import AzureKeyCredential
+from azure.ai.formrecognizer import FormRecognizerClient, FormTrainingClient
+
+endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
+key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
+
+form_recognizer_client = FormRecognizerClient(
+ endpoint=endpoint, credential=AzureKeyCredential(key)
+)
+
+form_training_client = FormTrainingClient(
+ endpoint=endpoint, credential=AzureKeyCredential(key)
+)
+```
+
+Creating new clients in `3.2.x`:
+```python
+from azure.core.credentials import AzureKeyCredential
+from azure.ai.formrecognizer import DocumentAnalysisClient, DocumentModelAdministrationClient
+
+endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
+key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
+
+document_analysis_client = DocumentAnalysisClient(
+ endpoint=endpoint, credential=AzureKeyCredential(key)
+)
+
+document_model_admin_client = DocumentModelAdministrationClient(
+ endpoint=endpoint, credential=AzureKeyCredential(key)
+)
+```
+
+### Analyzing documents
+
+Differences between the versions:
+- `begin_analyze_document` and `begin_analyze_document_from_url` accept a string with the desired model ID for analysis. The model ID can be any of the prebuilt model IDs or a custom model ID.
+- Along with more consolidated analysis methods in the `DocumentAnalysisClient`, the return types have also been improved and remove the hierarchical dependencies between elements. An instance of the `AnalyzeResult` model is now returned which showcases important document elements, such as key-value pairs, entities, tables, and document fields and values, among others, at the top level of the returned model. This can be contrasted with `RecognizedForm` which included more hierarchical relationships, for instance tables were an element of a `FormPage` and not a top-level element.
+- In the new version of the library, the functionality of `begin_recognize_content` has been added as a prebuilt model and can be called in library version `azure-ai-formrecognizer (3.2.x)` with `begin_analyze_document` by passing in the `prebuilt-layout` model ID. Similarly, to get general prebuilt document information, such as key-value pairs, entities, and text layout, the `prebuilt-document` model ID can be used with `begin_analyze_document`.
+- When calling `begin_analyze_document` and `begin_analyze_document_from_url` the returned type is an `AnalyzeResult` object, while the various methods used with `FormRecognizerClient` return a list of `RecognizedForm`.
+- The `pages` keyword argument is a string with library version `azure-ai-formrecognizer (3.2.x)`. In `azure-ai-formrecognizer (3.1.x)`, `pages` was a list of strings.
+- The `include_field_elements` keyword argument is not supported with the `DocumentAnalysisClient`, text details are automatically included with API version `2021-09-30-preview` and later.
+- The `reading_order` keyword argument does not exist on `begin_analyze_document` and `begin_analyze_document_from_url`. The service uses `natural` reading order to return data.
+
+Analyzing prebuilt models like business cards, identity documents, invoices, and receipts with `3.1.x`:
+```python
+with open(path_to_sample_forms, "rb") as f:
+ poller = form_recognizer_client.begin_recognize_receipts(receipt=f, locale="en-US")
+receipts = poller.result()
+
+for idx, receipt in enumerate(receipts):
+ print("--------Recognizing receipt #{}--------".format(idx+1))
+ receipt_type = receipt.fields.get("ReceiptType")
+ if receipt_type:
+ print("Receipt Type: {} has confidence: {}".format(receipt_type.value, receipt_type.confidence))
+ merchant_name = receipt.fields.get("MerchantName")
+ if merchant_name:
+ print("Merchant Name: {} has confidence: {}".format(merchant_name.value, merchant_name.confidence))
+ transaction_date = receipt.fields.get("TransactionDate")
+ if transaction_date:
+ print("Transaction Date: {} has confidence: {}".format(transaction_date.value, transaction_date.confidence))
+ if receipt.fields.get("Items"):
+ print("Receipt items:")
+ for idx, item in enumerate(receipt.fields.get("Items").value):
+ print("...Item #{}".format(idx+1))
+ item_name = item.value.get("Name")
+ if item_name:
+ print("......Item Name: {} has confidence: {}".format(item_name.value, item_name.confidence))
+ item_quantity = item.value.get("Quantity")
+ if item_quantity:
+ print("......Item Quantity: {} has confidence: {}".format(item_quantity.value, item_quantity.confidence))
+ item_price = item.value.get("Price")
+ if item_price:
+ print("......Individual Item Price: {} has confidence: {}".format(item_price.value, item_price.confidence))
+ item_total_price = item.value.get("TotalPrice")
+ if item_total_price:
+ print("......Total Item Price: {} has confidence: {}".format(item_total_price.value, item_total_price.confidence))
+ subtotal = receipt.fields.get("Subtotal")
+ if subtotal:
+ print("Subtotal: {} has confidence: {}".format(subtotal.value, subtotal.confidence))
+ tax = receipt.fields.get("Tax")
+ if tax:
+ print("Tax: {} has confidence: {}".format(tax.value, tax.confidence))
+ tip = receipt.fields.get("Tip")
+ if tip:
+ print("Tip: {} has confidence: {}".format(tip.value, tip.confidence))
+ total = receipt.fields.get("Total")
+ if total:
+ print("Total: {} has confidence: {}".format(total.value, total.confidence))
+ print("--------------------------------------")
+```
+
+Analyzing prebuilt models like business cards, identity documents, invoices, and receipts with `3.2.x`:
+```python
+with open(path_to_sample_documents, "rb") as f:
+ poller = document_analysis_client.begin_analyze_document(
+ "prebuilt-receipt", document=f, locale="en-US"
+ )
+receipts = poller.result()
+
+for idx, receipt in enumerate(receipts.documents):
+ print("--------Recognizing receipt #{}--------".format(idx + 1))
+ receipt_type = receipt.fields.get("ReceiptType")
+ if receipt_type:
+ print(
+ "Receipt Type: {} has confidence: {}".format(
+ receipt_type.value, receipt_type.confidence
+ )
+ )
+ merchant_name = receipt.fields.get("MerchantName")
+ if merchant_name:
+ print(
+ "Merchant Name: {} has confidence: {}".format(
+ merchant_name.value, merchant_name.confidence
+ )
+ )
+ transaction_date = receipt.fields.get("TransactionDate")
+ if transaction_date:
+ print(
+ "Transaction Date: {} has confidence: {}".format(
+ transaction_date.value, transaction_date.confidence
+ )
+ )
+ if receipt.fields.get("Items"):
+ print("Receipt items:")
+ for idx, item in enumerate(receipt.fields.get("Items").value):
+ print("...Item #{}".format(idx + 1))
+ item_name = item.value.get("Name")
+ if item_name:
+ print(
+ "......Item Name: {} has confidence: {}".format(
+ item_name.value, item_name.confidence
+ )
+ )
+ item_quantity = item.value.get("Quantity")
+ if item_quantity:
+ print(
+ "......Item Quantity: {} has confidence: {}".format(
+ item_quantity.value, item_quantity.confidence
+ )
+ )
+ item_price = item.value.get("Price")
+ if item_price:
+ print(
+ "......Individual Item Price: {} has confidence: {}".format(
+ item_price.value, item_price.confidence
+ )
+ )
+ item_total_price = item.value.get("TotalPrice")
+ if item_total_price:
+ print(
+ "......Total Item Price: {} has confidence: {}".format(
+ item_total_price.value, item_total_price.confidence
+ )
+ )
+ subtotal = receipt.fields.get("Subtotal")
+ if subtotal:
+ print(
+ "Subtotal: {} has confidence: {}".format(
+ subtotal.value, subtotal.confidence
+ )
+ )
+ tax = receipt.fields.get("Tax")
+ if tax:
+ print("Tax: {} has confidence: {}".format(tax.value, tax.confidence))
+ tip = receipt.fields.get("Tip")
+ if tip:
+ print("Tip: {} has confidence: {}".format(tip.value, tip.confidence))
+ total = receipt.fields.get("Total")
+ if total:
+ print("Total: {} has confidence: {}".format(total.value, total.confidence))
+ print("--------------------------------------")
+```
+
+Analyzing document content with `3.1.x`:
+
+> NOTE: With version `3.1.x` of the library this method was called with a `language` keyword argument to hint at the language for the document, whereas in version `3.2.x` of the library `locale` is used for this purpose.
+
+```python
+with open(path_to_sample_forms, "rb") as f:
+ poller = form_recognizer_client.begin_recognize_content(form=f)
+form_pages = poller.result()
+
+for idx, content in enumerate(form_pages):
+ print("----Recognizing content from page #{}----".format(idx+1))
+ print("Page has width: {} and height: {}, measured with unit: {}".format(
+ content.width,
+ content.height,
+ content.unit
+ ))
+ for table_idx, table in enumerate(content.tables):
+ print("Table # {} has {} rows and {} columns".format(table_idx, table.row_count, table.column_count))
+ print("Table # {} location on page: {}".format(table_idx, format_bounding_box(table.bounding_box)))
+ for cell in table.cells:
+ print("...Cell[{}][{}] has text '{}' within bounding box '{}'".format(
+ cell.row_index,
+ cell.column_index,
+ cell.text,
+ format_bounding_box(cell.bounding_box)
+ ))
+
+ for line_idx, line in enumerate(content.lines):
+ print("Line # {} has word count '{}' and text '{}' within bounding box '{}'".format(
+ line_idx,
+ len(line.words),
+ line.text,
+ format_bounding_box(line.bounding_box)
+ ))
+ if line.appearance:
+ if line.appearance.style_name == "handwriting" and line.appearance.style_confidence > 0.8:
+ print("Text line '{}' is handwritten and might be a signature.".format(line.text))
+ for word in line.words:
+ print("...Word '{}' has a confidence of {}".format(word.text, word.confidence))
+
+ for selection_mark in content.selection_marks:
+ print("Selection mark is '{}' within bounding box '{}' and has a confidence of {}".format(
+ selection_mark.state,
+ format_bounding_box(selection_mark.bounding_box),
+ selection_mark.confidence
+ ))
+ print("----------------------------------------")
+```
+
+
+Analyzing document layout with `3.2.x`:
+```python
+with open(path_to_sample_documents, "rb") as f:
+ poller = document_analysis_client.begin_analyze_document(
+ "prebuilt-layout", document=f
+ )
+result = poller.result()
+
+for idx, style in enumerate(result.styles):
+ print(
+ "Document contains {} content".format(
+ "handwritten" if style.is_handwritten else "no handwritten"
+ )
+ )
+
+for idx, page in enumerate(result.pages):
+ print("----Analyzing layout from page #{}----".format(idx + 1))
+ print(
+ "Page has width: {} and height: {}, measured with unit: {}".format(
+ page.width, page.height, page.unit
+ )
+ )
+
+ for line_idx, line in enumerate(page.lines):
+ print(
+ "Line # {} has text content '{}' within bounding box '{}'".format(
+ line_idx,
+ line.content,
+ format_bounding_box(line.bounding_box),
+ )
+ )
+
+ for word in page.words:
+ print(
+ "...Word '{}' has a confidence of {}".format(
+ word.content, word.confidence
+ )
+ )
+
+ for selection_mark in page.selection_marks:
+ print(
+ "Selection mark is '{}' within bounding box '{}' and has a confidence of {}".format(
+ selection_mark.state,
+ format_bounding_box(selection_mark.bounding_box),
+ selection_mark.confidence,
+ )
+ )
+
+for table_idx, table in enumerate(result.tables):
+ print(
+ "Table # {} has {} rows and {} columns".format(
+ table_idx, table.row_count, table.column_count
+ )
+ )
+ for region in table.bounding_regions:
+ print(
+ "Table # {} location on page: {} is {}".format(
+ table_idx,
+ region.page_number,
+ format_bounding_box(region.bounding_box),
+ )
+ )
+ for cell in table.cells:
+ print(
+ "...Cell[{}][{}] has text '{}'".format(
+ cell.row_index,
+ cell.column_index,
+ cell.content,
+ )
+ )
+ for region in cell.bounding_regions:
+ print(
+ "...content on page {} is within bounding box '{}'".format(
+ region.page_number,
+ format_bounding_box(region.bounding_box),
+ )
+ )
+
+print("----------------------------------------")
+```
+
+Analyzing general prebuilt document types with `3.2.x`:
+
+> NOTE: Analyzing a document with the `prebuilt-document` model replaces training without labels in version `3.1.x` of the library.
+
+```python
+with open(path_to_sample_documents, "rb") as f:
+ poller = document_analysis_client.begin_analyze_document(
+ "prebuilt-document", document=f
+ )
+result = poller.result()
+
+for style in result.styles:
+ print(
+ "Document contains {} content".format(
+ "handwritten" if style.is_handwritten else "no handwritten"
+ )
+ )
+
+for page in result.pages:
+ print("----Analyzing document from page #{}----".format(page.page_number))
+ print(
+ "Page has width: {} and height: {}, measured with unit: {}".format(
+ page.width, page.height, page.unit
+ )
+ )
+
+ for line_idx, line in enumerate(page.lines):
+ print(
+ "...Line # {} has text content '{}' within bounding box '{}'".format(
+ line_idx,
+ line.content,
+ format_bounding_box(line.bounding_box),
+ )
+ )
+
+ for word in page.words:
+ print(
+ "...Word '{}' has a confidence of {}".format(
+ word.content, word.confidence
+ )
+ )
+
+ for selection_mark in page.selection_marks:
+ print(
+ "...Selection mark is '{}' within bounding box '{}' and has a confidence of {}".format(
+ selection_mark.state,
+ format_bounding_box(selection_mark.bounding_box),
+ selection_mark.confidence,
+ )
+ )
+
+for table_idx, table in enumerate(result.tables):
+ print(
+ "Table # {} has {} rows and {} columns".format(
+ table_idx, table.row_count, table.column_count
+ )
+ )
+ for region in table.bounding_regions:
+ print(
+ "Table # {} location on page: {} is {}".format(
+ table_idx,
+ region.page_number,
+ format_bounding_box(region.bounding_box),
+ )
+ )
+ for cell in table.cells:
+ print(
+ "...Cell[{}][{}] has content '{}'".format(
+ cell.row_index,
+ cell.column_index,
+ cell.content,
+ )
+ )
+ for region in cell.bounding_regions:
+ print(
+ "...content on page {} is within bounding box '{}'\n".format(
+ region.page_number,
+ format_bounding_box(region.bounding_box),
+ )
+ )
+
+print("----Entities found in document----")
+for entity in result.entities:
+ print("Entity of category '{}' with sub-category '{}'".format(entity.category, entity.sub_category))
+ print("...has content '{}'".format(entity.content))
+ print("...within '{}' bounding regions".format(format_bounding_region(entity.bounding_regions)))
+ print("...with confidence {}\n".format(entity.confidence))
+
+print("----Key-value pairs found in document----")
+for kv_pair in result.key_value_pairs:
+ if kv_pair.key:
+ print(
+ "Key '{}' found within '{}' bounding regions".format(
+ kv_pair.key.content,
+ format_bounding_region(kv_pair.key.bounding_regions),
+ )
+ )
+ if kv_pair.value:
+ print(
+ "Value '{}' found within '{}' bounding regions\n".format(
+ kv_pair.value.content,
+ format_bounding_region(kv_pair.value.bounding_regions),
+ )
+ )
+print("----------------------------------------")
+```
+
+> NOTE: All of these samples also work with `begin_analyze_document_from_url` when providing a valid URL to the document.
+
+### Analyzing a document with a custom model
+
+Differences between the versions:
+- Analyzing a custom model with `DocumentAnalysisClient` uses the general `begin_analyze_document` and `begin_analyze_document_from_url` methods.
+- In order to analyze a custom model with `FormRecognizerClient` the `begin_recognize_custom_models` and its corresponding URL methods are used.
+- The `include_field_elements` keyword argument is not supported with the `DocumentAnalysisClient`, text details are automatically included with API version `2021-09-30-preview` and later.
+
+Analyze custom document with `3.1.x`:
+```python
+with open(path_to_sample_forms, "rb") as f:
+ poller = form_recognizer_client.begin_recognize_custom_forms(
+ model_id=model_id, form=f, include_field_elements=True
+ )
+forms = poller.result()
+
+for idx, form in enumerate(forms):
+ print("--------Recognizing Form #{}--------".format(idx+1))
+ print("Form has type {}".format(form.form_type))
+ print("Form has form type confidence {}".format(form.form_type_confidence))
+ print("Form was analyzed with model with ID {}".format(form.model_id))
+ for name, field in form.fields.items():
+ # each field is of type FormField
+ # label_data is populated if you are using a model trained without labels,
+ # since the service needs to make predictions for labels if not explicitly given to it.
+ if field.label_data:
+ print("...Field '{}' has label '{}' with a confidence score of {}".format(
+ name,
+ field.label_data.text,
+ field.confidence
+ ))
+
+ print("...Label '{}' has value '{}' with a confidence score of {}".format(
+ field.label_data.text if field.label_data else name, field.value, field.confidence
+ ))
+
+ # iterate over tables, lines, and selection marks on each page
+ for page in form.pages:
+ for i, table in enumerate(page.tables):
+ print("\nTable {} on page {}".format(i+1, table.page_number))
+ for cell in table.cells:
+ print("...Cell[{}][{}] has text '{}' with confidence {}".format(
+ cell.row_index, cell.column_index, cell.text, cell.confidence
+ ))
+ print("\nLines found on page {}".format(page.page_number))
+ for line in page.lines:
+ print("...Line '{}' is made up of the following words: ".format(line.text))
+ for word in line.words:
+ print("......Word '{}' has a confidence of {}".format(
+ word.text,
+ word.confidence
+ ))
+ if page.selection_marks:
+ print("\nSelection marks found on page {}".format(page.page_number))
+ for selection_mark in page.selection_marks:
+ print("......Selection mark is '{}' and has a confidence of {}".format(
+ selection_mark.state,
+ selection_mark.confidence
+ ))
+
+ print("-----------------------------------")
+```
+
+Analyze custom document with `3.2.x`:
+```python
+with open(path_to_sample_documents, "rb") as f:
+ poller = document_analysis_client.begin_analyze_document(
+ model=model_id, document=f
+ )
+result = poller.result()
+
+for idx, document in enumerate(result.documents):
+ print("--------Analyzing document #{}--------".format(idx + 1))
+ print("Document has type {}".format(document.doc_type))
+ print("Document has document type confidence {}".format(document.confidence))
+ print("Document was analyzed with model with ID {}".format(result.model_id))
+ for name, field in document.fields.items():
+ field_value = field.value if field.value else field.content
+ print("......found field of type '{}' with value '{}' and with confidence {}".format(field.value_type, field_value, field.confidence))
+
+
+# iterate over tables, lines, and selection marks on each page
+for page in result.pages:
+ print("\nLines found on page {}".format(page.page_number))
+ for line in page.lines:
+ print("...Line '{}'".format(line.content))
+ for word in page.words:
+ print(
+ "...Word '{}' has a confidence of {}".format(
+ word.content, word.confidence
+ )
+ )
+ if page.selection_marks:
+ print("\nSelection marks found on page {}".format(page.page_number))
+ for selection_mark in page.selection_marks:
+ print(
+ "...Selection mark is '{}' and has a confidence of {}".format(
+ selection_mark.state, selection_mark.confidence
+ )
+ )
+
+for i, table in enumerate(result.tables):
+ print("\nTable {} can be found on page:".format(i + 1))
+ for region in table.bounding_regions:
+ print("...{}".format(i + 1, region.page_number))
+ for cell in table.cells:
+ print(
+ "...Cell[{}][{}] has text '{}'".format(
+ cell.row_index, cell.column_index, cell.content
+ )
+ )
+print("-----------------------------------")
+```
+
+### Training a custom model
+
+Differences between the versions:
+- Files for building a new model for version `3.2.x` can be created using the labeling tool found [here][fr_labeling_tool].
+- In version `3.1.x` the `use_training_labels` keyword argument was used to indicate whether to use labeled data when creating the custom model.
+- In version `3.2.x` the `use_training_labels` keyword argument is not supported since training must be carried out with labeled training documents. Additionally train without labels is now replaced with the prebuilt model "prebuilt-document" which extracts entities, key-value pairs, and layout from a document.
+
+Train a custom model with `3.1.x`:
+```python
+form_training_client = FormTrainingClient(endpoint, AzureKeyCredential(key))
+poller = form_training_client.begin_training(
+ container_sas_url, use_training_labels=True, model_name="mymodel"
+)
+model = poller.result()
+
+# Custom model information
+print("Model ID: {}".format(model.model_id))
+print("Status: {}".format(model.status))
+print("Model name: {}".format(model.model_name))
+print("Is this a composed model?: {}".format(model.properties.is_composed_model))
+print("Training started on: {}".format(model.training_started_on))
+print("Training completed on: {}".format(model.training_completed_on))
+
+print("Recognized fields:")
+# looping through the submodels, which contains the fields they were trained on
+for submodel in model.submodels:
+ print("...The submodel has model ID: {}".format(submodel.model_id))
+ print("...The submodel with form type {} has an average accuracy '{}'".format(
+ submodel.form_type, submodel.accuracy
+ ))
+ for name, field in submodel.fields.items():
+ print("...The model found the field '{}' with an accuracy of {}".format(
+ name, field.accuracy
+ ))
+
+# Training result information
+for doc in model.training_documents:
+ print("Document name: {}".format(doc.name))
+ print("Document status: {}".format(doc.status))
+ print("Document page count: {}".format(doc.page_count))
+ print("Document errors: {}".format(doc.errors))
+```
+
+Train a custom model with `3.2.x`:
+```python
+document_model_admin_client = DocumentModelAdministrationClient(endpoint, AzureKeyCredential(key))
+poller = document_model_admin_client.begin_build_model(
+ container_sas_url, model_id="my-model-id", description="my model description"
+)
+model = poller.result()
+
+print("Model ID: {}".format(model.model_id))
+print("Description: {}".format(model.description))
+print("Model created on: {}\n".format(model.created_on))
+print("Doc types the model can recognize:")
+for name, doc_type in model.doc_types.items():
+ print("\nDoc Type: '{}' which has the following fields:".format(name))
+ for field_name, confidence in doc_type.field_confidence.items():
+ print("Field: '{}' has confidence score {}".format(field_name, confidence))
+```
+
+### Manage models
+
+Differences between the versions:
+- When using API version `2021-09-30-preview` and later models no longer include submodels, instead a model can analyze different document types.
+- When building, composing, or copying models users can now assign their own model IDs and specify a description.
+- In version `3.2.x` of the library, only models that build successfully can be retrieved from the get and list model calls. Unsuccessful model operations can be viewed with the get and list operation methods (note that document model operation data persists for only 24 hours). In version `3.1.x` of the library, models that had not succeeded were still created, had to be deleted by the user, and were returned in the list models response.
+
+## Additional samples
+
+For additional samples please take a look at the [Form Recognizer Samples][samples_readme] for more guidance.
+
+[readme]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/README.md
+[samples_readme]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/README.md
+[fr_labeling_tool]: https://aka.ms/azsdk/formrecognizer/labelingtool
\ No newline at end of file
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/README.md b/sdk/formrecognizer/azure-ai-formrecognizer/README.md
index 6a3ceba3b737..7b53c521c1ee 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/README.md
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/README.md
@@ -39,7 +39,7 @@ This table shows the relationship between SDK versions and supported API version
|3.0.0| 2.0
> Note: Starting with version 2021-09-30-preview, a new set of clients were introduced to leverage the newest features
-> of the Form Recognizer service. Please see the Migration Guide for detailed instructions on how to update application
+> of the Form Recognizer service. Please see the [Migration Guide][migration-guide] for detailed instructions on how to update application
> code from client library version 3.1.X or lower to the latest version. Additionally, see the [Changelog][changelog] for more detailed information.
> The below table describes the relationship of each client and its supported API version(s):
@@ -160,6 +160,7 @@ Use the `model` parameter to select the type of model for analysis.
|"{custom-model-id}"| Text extraction, selection marks, tables, labeled fields and values from your custom documents
Sample code snippets are provided to illustrate using a DocumentAnalysisClient [here](#examples "Examples").
+More information about analyzing documents, including supported features and locales can be found in the [service documentation][fr-models].
### DocumentModelAdministrationClient
`DocumentModelAdministrationClient` provides operations for:
@@ -191,6 +192,7 @@ The following section provides several code snippets covering some of the most c
* [Extract layout](#extract-layout "Extract Layout")
* [Using Prebuilt Models](#using-prebuilt-models "Using Prebuilt Models")
+* [Using Prebuilt Document](#using-prebuilt-document "Using Prebuilt Document")
* [Build a Model](#build-a-model "Build a model")
* [Analyze Documents Using a Custom Model](#analyze-documents-using-a-custom-model "Analyze Documents Using a Custom Model")
* [Manage Your Models](#manage-your-models "Manage Your Models")
@@ -310,6 +312,83 @@ You are not limited to receipts! There are a few prebuilt models to choose from,
- Analyze invoices using the `prebuilt-invoice` model (fields recognized by the service can be found [here][service_recognize_invoice]).
- Analyze identity documents using the `prebuilt-idDocuments` model (fields recognized by the service can be found [here][service_recognize_identity_documents]).
+### Using Prebuilt Document
+Analyze entities, key-value pairs, tables, styles, and selection marks from documents using the general prebuilt document model provided by the Form Recognizer service.
+Select the Prebuilt Document model by passing `model="prebuilt-document"` into the `begin_analyze_documents` method:
+
+```python
+from azure.ai.formrecognizer import DocumentAnalysisClient
+from azure.core.credentials import AzureKeyCredential
+
+endpoint = "https://.cognitiveservices.azure.com/"
+credential = AzureKeyCredential("")
+
+document_analysis_client = DocumentAnalysisClient(endpoint, credential)
+
+with open("", "rb") as fd:
+ document = fd.read()
+
+poller = document_analysis_client.begin_analyze_document("prebuilt-document", document)
+result = poller.result()
+
+print("----Entities found in document----")
+for entity in result.entities:
+ print("Entity '{}' has category '{}' with sub-category '{}'".format(
+ entity.content, entity.category, entity.sub_category
+ ))
+ print("...with confidence {}\n".format(entity.confidence))
+
+print("----Key-value pairs found in document----")
+for kv_pair in result.key_value_pairs:
+ if kv_pair.key:
+ print(
+ "Key '{}' found within '{}' bounding regions".format(
+ kv_pair.key.content,
+ kv_pair.key.bounding_regions,
+ )
+ )
+ if kv_pair.value:
+ print(
+ "Value '{}' found within '{}' bounding regions\n".format(
+ kv_pair.value.content,
+ kv_pair.value.bounding_regions,
+ )
+ )
+
+print("----Tables found in document----")
+for table_idx, table in enumerate(result.tables):
+ print(
+ "Table # {} has {} rows and {} columns".format(
+ table_idx, table.row_count, table.column_count
+ )
+ )
+ for region in table.bounding_regions:
+ print(
+ "Table # {} location on page: {} is {}".format(
+ table_idx,
+ region.page_number,
+ region.bounding_box,
+ )
+ )
+
+print("----Styles found in document----")
+for style in result.styles:
+ if style.is_handwritten:
+ print("Document contains handwritten content: ")
+ print(",".join([result.content[span.offset:span.offset + span.length] for span in style.spans]))
+
+print("----Selection marks found in document----")
+for page in result.pages:
+ for selection_mark in page.selection_marks:
+ print(
+ "...Selection mark is '{}' within bounding box '{}' and has a confidence of {}".format(
+ selection_mark.state,
+ selection_mark.bounding_box,
+ selection_mark.confidence,
+ )
+ )
+```
+
### Build a model
Build a custom model on your own document type. The resulting model can be used to analyze values from the types of documents it was trained on.
Provide a container SAS URL to your Azure Storage Blob container where you're storing the training documents.
@@ -454,6 +533,7 @@ except ResourceNotFoundError:
### General
Form Recognizer client library will raise exceptions defined in [Azure Core][azure_core_exceptions].
+Error codes and messages raised by the Form Recognizer service can be found in the [service documentation][fr-errors].
### Logging
This library uses the standard
@@ -506,6 +586,8 @@ This project has adopted the [Microsoft Open Source Code of Conduct][code_of_con
[fr-labeling-tool]: https://aka.ms/azsdk/formrecognizer/labelingtool
[fr-build-model]: https://aka.ms/azsdk/formrecognizer/buildmodel
[fr-build-training-set]: https://aka.ms/azsdk/formrecognizer/buildtrainingset
+[fr-models]: https://aka.ms/azsdk/formrecognizer/models
+[fr-errors]: https://aka.ms/azsdk/formrecognizer/errors
[azure_core_ref_docs]: https://aka.ms/azsdk/python/core/docs
[azure_core_exceptions]: https://aka.ms/azsdk/python/core/docs#module-azure.core.exceptions
@@ -518,13 +600,14 @@ This project has adopted the [Microsoft Open Source Code of Conduct][code_of_con
[custom_subdomain]: https://docs.microsoft.com/azure/cognitive-services/authentication#create-a-resource-with-a-custom-subdomain
[azure_identity]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity
[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential
-[service_recognize_receipt]: https://aka.ms/formrecognizer/receiptfields
-[service_recognize_business_cards]: https://aka.ms/formrecognizer/businesscardfields
-[service_recognize_invoice]: https://aka.ms/formrecognizer/invoicefields
-[service_recognize_identity_documents]: https://aka.ms/formrecognizer/iddocumentfields
+[service_recognize_receipt]: https://aka.ms/azsdk/formrecognizer/receiptfieldschema
+[service_recognize_business_cards]: https://aka.ms/azsdk/formrecognizer/businesscardfieldschema
+[service_recognize_invoice]: https://aka.ms/azsdk/formrecognizer/invoicefieldschema
+[service_recognize_identity_documents]: https://aka.ms/azsdk/formrecognizer/iddocumentfieldschema
[sdk_logging_docs]: https://docs.microsoft.com/azure/developer/python/azure-sdk-logging
[sample_readme]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples
[changelog]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md
+[migration-guide]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/MIGRATION_GUIDE.md
[cla]: https://cla.microsoft.com
[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_document_analysis_client.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_document_analysis_client.py
index b71b519195e1..9e625e4693db 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_document_analysis_client.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_document_analysis_client.py
@@ -78,9 +78,8 @@ def begin_analyze_document(self, model, document, **kwargs):
"""Analyze field text and semantic values from a given document.
:param str model: A unique model identifier can be passed in as a string.
- Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs to use are:
- "prebuilt-receipt", "prebuilt-invoice", "prebuilt-idDocument", "prebuilt-businessCard",
- "prebuilt-document", "prebuilt-layout".
+ Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs supported
+ can be found here: https://aka.ms/azsdk/formrecognizer/models
:param document: JPEG, PNG, PDF, TIFF, or BMP type file stream or bytes.
:type document: bytes or IO[bytes]
:keyword str pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
@@ -134,9 +133,8 @@ def begin_analyze_document_from_url(self, model, document_url, **kwargs):
The input must be the location (URL) of the document to be analyzed.
:param str model: A unique model identifier can be passed in as a string.
- Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs to use are:
- "prebuilt-receipt", "prebuilt-invoice", "prebuilt-idDocument", "prebuilt-businessCard",
- "prebuilt-document", "prebuilt-layout".
+ Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs supported
+ can be found here: https://aka.ms/azsdk/formrecognizer/models
:param str document_url: The URL of the document to analyze. The input must be a valid, encoded, and
publicly accessible URL of one of the supported formats: JPEG, PNG, PDF, TIFF, or BMP.
:keyword str pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_base_client.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_base_client.py
index 037e8e8c6c98..ce37f74b2a34 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_base_client.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_base_client.py
@@ -56,6 +56,8 @@ def __init__(self, endpoint, credential, **kwargs):
"op",
"pages",
"readingOrder",
+ "stringIndexType",
+ "api-version"
}
)
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_document_analysis_client_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_document_analysis_client_async.py
index f5ebaa29eb3f..25b71f5cfc16 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_document_analysis_client_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_document_analysis_client_async.py
@@ -84,9 +84,8 @@ async def begin_analyze_document(
"""Analyze field text and semantic values from a given document.
:param str model: A unique model identifier can be passed in as a string.
- Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs to use are:
- "prebuilt-receipt", "prebuilt-invoice", "prebuilt-idDocument", "prebuilt-businessCard",
- "prebuilt-document", "prebuilt-layout".
+ Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs supported
+ can be found here: https://aka.ms/azsdk/formrecognizer/models
:param document: JPEG, PNG, PDF, TIFF, or BMP type file stream or bytes.
:type document: bytes or IO[bytes]
:keyword str pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
@@ -141,9 +140,8 @@ async def begin_analyze_document_from_url(
The input must be the location (URL) of the document to be analyzed.
:param str model: A unique model identifier can be passed in as a string.
- Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs to use are:
- "prebuilt-receipt", "prebuilt-invoice", "prebuilt-idDocument", "prebuilt-businessCard",
- "prebuilt-document", "prebuilt-layout".
+ Use this to specify the custom model ID or prebuilt model ID. Prebuilt model IDs supported
+ can be found here: https://aka.ms/azsdk/formrecognizer/models
:param str document_url: The URL of the document to analyze. The input must be a valid, encoded, and
publicly accessible URL of one of the supported formats: JPEG, PNG, PDF, TIFF, or BMP.
:keyword str pages: Custom page numbers for multi-page documents(PDF/TIFF). Input the page numbers
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_base_client_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_base_client_async.py
index 1e3b22effa3e..6468e6bb5665 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_base_client_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_base_client_async.py
@@ -67,6 +67,8 @@ def __init__(
"op",
"pages",
"readingOrder",
+ "stringIndexType",
+ "api-version"
}
)
self._client = FormRecognizer(
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/README.md b/sdk/formrecognizer/azure-ai-formrecognizer/samples/README.md
index 3dbf77803ad1..5ae1abf2c376 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/README.md
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/README.md
@@ -11,6 +11,10 @@ urlFragment: formrecognizer-samples
# Samples for Azure Form Recognizer client library for Python
+> Note: Starting with version 2021-09-30-preview, a new set of clients were introduced to leverage the newest features
+> of the Form Recognizer service. Please see the [Migration Guide][migration-guide] for detailed instructions on how to update application
+> code from client library version 3.1.X or lower to the latest version. Additionally, see the [Changelog][changelog] for more detailed information.
+
These code samples show common scenario operations with the Azure Form Recognizer client library.
The async versions of the samples require Python 3.6 or later.
@@ -18,6 +22,26 @@ These sample programs show common scenarios for the Form Recognizer client's off
All of these samples need the endpoint to your Form Recognizer resource ([instructions on how to get endpoint][get-endpoint-instructions]), and your Form Recognizer API key ([instructions on how to get key][get-key-instructions]).
+## Samples for client library versions 3.2.0b1 and later
+
+|**File Name**|**Description**|
+|----------------|-------------|
+|[sample_authentication.py][sample_auth] and [sample_authentication_async.py][sample_auth_async]|Authenticate the client|
+|[sample_analyze_layout.py][sample_analyze_layout] and [sample_analyze_layout_async.py][sample_analyze_layout_async]|Extract text, selection marks, and table structures in a document|
+|[sample_analyze_prebuilt_document.py][sample_analyze_prebuilt_document] and [sample_analyze_prebuilt_document_async.py][sample_analyze_prebuilt_document_async]|Analyze document key-value pairs, entities, tables, and selection marks using a prebuilt model|
+|[sample_analyze_invoices.py][sample_analyze_invoices] and [sample_analyze_invoices_async.py][sample_analyze_invoices_async]|Analyze document text, selection marks, tables, and pre-trained fields and values pertaining to English invoices using a prebuilt model|
+|[sample_analyze_business_cards.py][sample_analyze_business_cards] and [sample_analyze_business_cards_async.py][sample_analyze_business_cards_async]|Analyze document text and pre-trained fields and values pertaining to English business cards using a prebuilt model|
+|[sample_analyze_identity_documents.py][sample_analyze_identity_documents] and [sample_analyze_identity_documents_async.py][sample_analyze_identity_documents_async]|Analyze document text and pre-trained fields and values pertaining to US driver licenses and international passports using a prebuilt model|
+|[sample_analyze_receipts.py][sample_analyze_receipts] and [sample_analyze_receipts_async.py][sample_analyze_receipts_async]|Analyze document text and pre-trained fields and values pertaining to English sales receipts using a prebuilt model|
+|[sample_analyze_custom_documents.py][sample_analyze_custom_documents] and [sample_analyze_custom_documents_async.py][sample_analyze_custom_documents_async]|Analyze custom documents with your custom model to extract text, field values, selection marks, and table data from documents|
+|[sample_build_model.py][sample_build_model] and [sample_build_model_async.py][sample_build_model_async]|Build a custom model|
+|[sample_create_composed_model.py][sample_composed_model] and [sample_create_composed_model_async.py][sample_composed_model_async]|Create a composed model from a collection of existing models to be called with a single model ID|
+|[sample_manage_models.py][sample_manage_models] and [sample_manage_models_async.py][sample_manage_models_async]|Manage the models in your account|
+|[sample_get_operations.py][sample_get_operations] and [sample_get_operations_async.py][sample_get_operations_async]|Get and list the document model operations created within the past 24 hours|
+|[sample_copy_model.py][sample_copy] and [sample_copy_model_async.py][sample_copy_async]|Copy a custom model from one Form Recognizer resource to another|
+
+## Samples for client library versions 3.1.X
+
|**File Name**|**Description**|
|----------------|-------------|
|[sample_authentication.py][sample_authentication] and [sample_authentication_async.py][sample_authentication_async]|Authenticate the client|
@@ -33,6 +57,14 @@ All of these samples need the endpoint to your Form Recognizer resource ([instru
|[sample_manage_custom_models.py][sample_manage_custom_models] and [sample_manage_custom_models_async.py][sample_manage_custom_models_async]|Manage the custom models in your account|
|[sample_copy_model.py][sample_copy_model] and [sample_copy_model_async.py][sample_copy_model_async]|Copy a custom model from one Form Recognizer resource to another|
|[sample_create_composed_model.py][sample_create_composed_model] and [sample_create_composed_model_async.py][sample_create_composed_model_async]|Create a composed model from a collection of existing models trained with labels|
+|[sample_strongly_typing_recognized_form.py][sample_strongly_typing_recognized_form] and [sample_strongly_typing_recognized_form_async.py][sample_strongly_typing_recognized_form_async]|Use the fields in your recognized forms to create an object with strongly-typed fields|
+|[sample_get_bounding_boxes.py][sample_get_bounding_boxes] and [sample_get_bounding_boxes_async.py][sample_get_bounding_boxes_async]|Get info to visualize the outlines of form content and fields, which can be used for manual validation|
+|[sample_differentiate_output_models_trained_with_and_without_labels.py][sample_differentiate_output_models_trained_with_and_without_labels] and [sample_differentiate_output_models_trained_with_and_without_labels_async.py][sample_differentiate_output_models_trained_with_and_without_labels_async]|See the differences in output when using a custom model trained with labeled data and one trained with unlabeled data|
+|[sample_differentiate_output_labeled_tables.py][sample_differentiate_output_labeled_tables] and [sample_differentiate_output_labeled_tables_async.py][sample_differentiate_output_labeled_tables_async]|See the differences in output when using a custom model trained with fixed vs. dynamic table tags|
+
+## Samples for client library versions 3.0.0 and below
+
+Please see the samples [here][v3.0.0-samples-tag].
## Prerequisites
* Python 2.7, or 3.6 or later is required to use this package (3.6 or later if using asyncio)
@@ -44,7 +76,7 @@ All of these samples need the endpoint to your Form Recognizer resource ([instru
1. Install the Azure Form Recognizer client library for Python with [pip][pip]:
```bash
-pip install azure-ai-formrecognizer
+pip install azure-ai-formrecognizer --pre
```
2. Clone or download this sample repository
@@ -54,19 +86,13 @@ pip install azure-ai-formrecognizer
1. Open a terminal window and `cd` to the directory that the samples are saved in.
2. Set the environment variables specified in the sample file you wish to run.
-3. Follow the usage described in the file, e.g. `python sample_recognize_receipts.py`
+3. Follow the usage described in the file, e.g. `python sample_analyze_receipts.py`
## Next steps
Check out the [API reference documentation][python-fr-ref-docs] to learn more about
what you can do with the Azure Form Recognizer client library.
-|**Advanced Sample File Name**|**Description**|
-|----------------|-------------|
-|[sample_strongly_typing_recognized_form.py][sample_strongly_typing_recognized_form] and [sample_strongly_typing_recognized_form_async.py][sample_strongly_typing_recognized_form_async]|Use the fields in your recognized forms to create an object with strongly-typed fields|
-|[sample_get_bounding_boxes.py][sample_get_bounding_boxes] and [sample_get_bounding_boxes_async.py][sample_get_bounding_boxes_async]|Get info to visualize the outlines of form content and fields, which can be used for manual validation|
-|[sample_differentiate_output_models_trained_with_and_without_labels.py][sample_differentiate_output_models_trained_with_and_without_labels] and [sample_differentiate_output_models_trained_with_and_without_labels_async.py][sample_differentiate_output_models_trained_with_and_without_labels_async]|See the differences in output when using a custom model trained with labeled data and one trained with unlabeled data|
-|[sample_differentiate_output_labeled_tables.py][sample_differentiate_output_labeled_tables] and [sample_differentiate_output_labeled_tables_async.py][sample_differentiate_output_labeled_tables_async]|See the differences in output when using a custom model trained with fixed vs. dynamic table tags|
[azure_identity]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity
@@ -77,42 +103,71 @@ what you can do with the Azure Form Recognizer client library.
[python-fr-ref-docs]: https://aka.ms/azsdk/python/formrecognizer/docs
[get-endpoint-instructions]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/README.md#looking-up-the-endpoint
[get-key-instructions]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/README.md#get-the-api-key
-
-
\ No newline at end of file
+[changelog]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md
+[v3.0.0-samples-tag]: https://github.com/Azure/azure-sdk-for-python/tree/azure-ai-formrecognizer_3.0.0/sdk/formrecognizer/azure-ai-formrecognizer/samples
+[migration-guide]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/MIGRATION_GUIDE.md
+
+
+
+[sample_auth]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_authentication.py
+[sample_auth_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_authentication_async.py
+[sample_analyze_layout]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_layout.py
+[sample_analyze_layout_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_layout_async.py
+[sample_analyze_prebuilt_document]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_prebuilt_document.py
+[sample_analyze_prebuilt_document_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_prebuilt_document_async.py
+[sample_analyze_invoices]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_invoices.py
+[sample_analyze_invoices_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_invoices_async.py
+[sample_analyze_business_cards]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_business_cards.py
+[sample_analyze_business_cards_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_business_cards_async.py
+[sample_analyze_identity_documents]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_identity_documents.py
+[sample_analyze_identity_documents_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_identity_documents_async.py
+[sample_analyze_receipts]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_receipts.py
+[sample_analyze_receipts_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_receipts_async.py
+[sample_analyze_custom_documents]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_custom_documents.py
+[sample_analyze_custom_documents_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_custom_documents_async.py
+[sample_build_model]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_build_model.py
+[sample_build_model_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_build_model_async.py
+[sample_composed_model]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_create_composed_model.py
+[sample_composed_model_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_create_composed_model_async.py
+[sample_manage_models]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_manage_models.py
+[sample_manage_models_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_manage_models_async.py
+[sample_get_operations]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_get_operations.py
+[sample_get_operations_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_get_operations_async.py
+[sample_copy]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_copy_model.py
+[sample_copy_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_copy_model_async.py
+
+
+[sample_authentication]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_authentication.py
+[sample_authentication_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_authentication_async.py
+[sample_differentiate_output_models_trained_with_and_without_labels]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_differentiate_output_models_trained_with_and_without_labels.py
+[sample_differentiate_output_models_trained_with_and_without_labels_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_differentiate_output_models_trained_with_and_without_labels_async.py
+[sample_get_bounding_boxes]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_get_bounding_boxes.py
+[sample_get_bounding_boxes_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_get_bounding_boxes_async.py
+[sample_manage_custom_models]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_manage_custom_models.py
+[sample_manage_custom_models_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_manage_custom_models_async.py
+[sample_recognize_content]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_content.py
+[sample_recognize_content_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_content_async.py
+[sample_recognize_custom_forms]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_custom_forms.py
+[sample_recognize_custom_forms_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_custom_forms_async.py
+[sample_recognize_receipts_from_url]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_receipts_from_url.py
+[sample_recognize_receipts_from_url_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_receipts_from_url_async.py
+[sample_recognize_receipts]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_receipts.py
+[sample_recognize_receipts_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_receipts_async.py
+[sample_recognize_business_cards]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_business_cards.py
+[sample_recognize_business_cards_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_business_cards_async.py
+[sample_recognize_identity_documents]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_identity_documents.py
+[sample_recognize_identity_documents_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_identity_documents_async.py
+[sample_recognize_invoices]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_invoices.py
+[sample_recognize_invoices_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_invoices_async.py
+[sample_train_model_with_labels]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_train_model_with_labels.py
+[sample_train_model_with_labels_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_train_model_with_labels_async.py
+[sample_train_model_without_labels]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_train_model_without_labels.py
+[sample_train_model_without_labels_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_train_model_without_labels_async.py
+[sample_copy_model]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_copy_model.py
+[sample_copy_model_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_copy_model_async.py
+[sample_strongly_typing_recognized_form]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_strongly_typing_recognized_form.py
+[sample_strongly_typing_recognized_form_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_strongly_typing_recognized_form_async.py
+[sample_create_composed_model]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_create_composed_model.py
+[sample_create_composed_model_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_create_composed_model_async.py
+[sample_differentiate_output_labeled_tables]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_differentiate_output_labeled_tables.py
+[sample_differentiate_output_labeled_tables_async]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_differentiate_output_labeled_tables_async.py
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_authentication_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_authentication_async.py
index e6df75d36622..edb62df41a7d 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_authentication_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_authentication_async.py
@@ -45,7 +45,7 @@ async def authentication_with_api_key_credential_form_recognizer_client_async(se
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
- form_recognizer_client = FormRecognizerClient(endpoint, AzureKeyCredential(key), api_version="2.1")
+ form_recognizer_client = FormRecognizerClient(endpoint, AzureKeyCredential(key))
# [END create_fr_client_with_key_async]
async with form_recognizer_client:
poller = await form_recognizer_client.begin_recognize_content_from_url(self.url)
@@ -62,7 +62,7 @@ async def authentication_with_azure_active_directory_form_recognizer_client_asyn
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
credential = DefaultAzureCredential()
- form_recognizer_client = FormRecognizerClient(endpoint, credential, api_version="2.1")
+ form_recognizer_client = FormRecognizerClient(endpoint, credential)
# [END create_fr_client_with_aad_async]
async with form_recognizer_client:
poller = await form_recognizer_client.begin_recognize_content_from_url(self.url)
@@ -75,7 +75,7 @@ async def authentication_with_api_key_credential_form_training_client_async(self
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
- form_training_client = FormTrainingClient(endpoint, AzureKeyCredential(key), api_version="2.1")
+ form_training_client = FormTrainingClient(endpoint, AzureKeyCredential(key))
# [END create_ft_client_with_key_async]
async with form_training_client:
properties = await form_training_client.get_account_properties()
@@ -91,7 +91,7 @@ async def authentication_with_azure_active_directory_form_training_client_async(
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
credential = DefaultAzureCredential()
- form_training_client = FormTrainingClient(endpoint, credential, api_version="2.1")
+ form_training_client = FormTrainingClient(endpoint, credential)
# [END create_ft_client_with_aad_async]
async with form_training_client:
properties = await form_training_client.get_account_properties()
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_copy_model_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_copy_model_async.py
index 00d91a4d1c29..ec6eb26aee43 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_copy_model_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_copy_model_async.py
@@ -52,7 +52,7 @@ async def copy_model_async(self, custom_model_id):
target_resource_id = os.environ["AZURE_FORM_RECOGNIZER_TARGET_RESOURCE_ID"]
# [START get_copy_authorization_async]
- target_client = FormTrainingClient(endpoint=target_endpoint, credential=AzureKeyCredential(target_key), api_version="2.1")
+ target_client = FormTrainingClient(endpoint=target_endpoint, credential=AzureKeyCredential(target_key))
async with target_client:
target = await target_client.get_copy_authorization(
@@ -64,7 +64,7 @@ async def copy_model_async(self, custom_model_id):
# [END get_copy_authorization_async]
# [START copy_model_async]
- source_client = FormTrainingClient(endpoint=source_endpoint, credential=AzureKeyCredential(source_key), api_version="2.1")
+ source_client = FormTrainingClient(endpoint=source_endpoint, credential=AzureKeyCredential(source_key))
async with source_client:
poller = await source_client.begin_copy_model(
@@ -93,7 +93,7 @@ async def main():
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with form_training_client:
model = await (await form_training_client.begin_training(
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_create_composed_model_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_create_composed_model_async.py
index 37eb03e0d687..c6282e36d74f 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_create_composed_model_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_create_composed_model_async.py
@@ -53,7 +53,7 @@ async def create_composed_model_async(self):
po_furniture = os.environ['PURCHASE_ORDER_OFFICE_FURNITURE_SAS_URL_V2']
po_cleaning_supplies = os.environ['PURCHASE_ORDER_OFFICE_CLEANING_SUPPLIES_SAS_URL_V2']
- form_training_client = FormTrainingClient(endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1")
+ form_training_client = FormTrainingClient(endpoint=endpoint, credential=AzureKeyCredential(key))
async with form_training_client:
supplies_poller = await form_training_client.begin_training(
po_supplies, use_training_labels=True, model_name="Purchase order - Office supplies"
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_differentiate_output_labeled_tables_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_differentiate_output_labeled_tables_async.py
index 051e4c7730f8..18b6d214bb15 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_differentiate_output_labeled_tables_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_differentiate_output_labeled_tables_async.py
@@ -53,7 +53,7 @@ async def test_recognize_tables_fixed_rows_async(self, custom_model_id):
model_id_fixed_rows_table = os.getenv("MODEL_ID_FIXED_ROW_TABLES", custom_model_id)
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__),
@@ -96,7 +96,7 @@ async def test_recognize_tables_dynamic_rows_async(self, custom_model_id):
model_id_dynamic_rows_table = os.getenv("MODEL_ID_DYNAMIC_ROW_TABLES", custom_model_id)
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__),
@@ -149,7 +149,7 @@ async def main():
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with form_training_client:
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_differentiate_output_models_trained_with_and_without_labels_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_differentiate_output_models_trained_with_and_without_labels_async.py
index 228a1d25bf84..4d951bbc6359 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_differentiate_output_models_trained_with_and_without_labels_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_differentiate_output_models_trained_with_and_without_labels_async.py
@@ -59,7 +59,7 @@ async def recognize_custom_forms(self, labeled_model_id, unlabeled_model_id):
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", "..", "./sample_forms/forms/Form_1.jpg"))
async with FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
with open(path_to_sample_forms, "rb") as f:
@@ -143,7 +143,7 @@ async def main():
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with form_training_client:
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_get_bounding_boxes_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_get_bounding_boxes_async.py
index c71f4cc9c7af..7ab408f9bb70 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_get_bounding_boxes_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_get_bounding_boxes_async.py
@@ -51,7 +51,7 @@ async def get_bounding_boxes(self, custom_model_id):
model_id = os.getenv("CUSTOM_TRAINED_MODEL_ID", custom_model_id)
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__),
@@ -135,7 +135,7 @@ async def main():
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with form_training_client:
model = await (await form_training_client.begin_training(
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_manage_custom_models_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_manage_custom_models_async.py
index 3b3690980185..cb93556c966c 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_manage_custom_models_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_manage_custom_models_async.py
@@ -39,7 +39,7 @@ async def manage_custom_models(self):
# [START get_account_properties_async]
async with FormTrainingClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_training_client:
# First, we see how many custom models we have, and what our limit is
account_properties = await form_training_client.get_account_properties()
@@ -52,9 +52,9 @@ async def manage_custom_models(self):
# [START list_custom_models_async]
custom_models = form_training_client.list_custom_models()
- # print("We have models with the following IDs:") TODO list models is returning null for some models
- # async for model in custom_models:
- # print(model.model_id)
+ print("We have models with the following IDs:")
+ async for model in custom_models:
+ print(model.model_id)
# [END list_custom_models_async]
# let's train a model to use for this sample
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_business_cards_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_business_cards_async.py
index 2342eb8ad5b5..dd55efaf271a 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_business_cards_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_business_cards_async.py
@@ -41,7 +41,7 @@ async def recognize_business_card_async(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with form_recognizer_client:
with open(path_to_sample_forms, "rb") as f:
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_content_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_content_async.py
index 7d994ce256b6..c9d849cb592d 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_content_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_content_async.py
@@ -48,7 +48,7 @@ async def recognize_content(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
async with FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
with open(path_to_sample_forms, "rb") as f:
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_custom_forms_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_custom_forms_async.py
index 4bdec624340c..56054a5f9428 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_custom_forms_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_custom_forms_async.py
@@ -48,7 +48,7 @@ async def recognize_custom_forms(self, custom_model_id):
model_id = os.getenv("CUSTOM_TRAINED_MODEL_ID", custom_model_id)
async with FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
# Make sure your form's type is included in the list of form types the custom model can recognize
@@ -121,7 +121,7 @@ async def main():
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with form_training_client:
model = await (await form_training_client.begin_training(
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_identity_documents_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_identity_documents_async.py
index 0ddcbdde3081..45c645dcf70b 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_identity_documents_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_identity_documents_async.py
@@ -41,7 +41,7 @@ async def recognize_identity_documents(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
async with FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
with open(path_to_sample_forms, "rb") as f:
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_invoices_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_invoices_async.py
index 36eebfb2286d..948a3ccbb6ca 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_invoices_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_invoices_async.py
@@ -41,7 +41,7 @@ async def recognize_invoice(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
async with FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
with open(path_to_sample_forms, "rb") as f:
poller = await form_recognizer_client.begin_recognize_invoices(invoice=f, locale="en-US")
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_receipts_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_receipts_async.py
index 00695047350a..87122dffb7fd 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_receipts_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_receipts_async.py
@@ -42,7 +42,7 @@ async def recognize_receipts(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
async with FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
with open(path_to_sample_forms, "rb") as f:
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_receipts_from_url_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_receipts_from_url_async.py
index 985c7084fd58..50bb1afac659 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_receipts_from_url_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_receipts_from_url_async.py
@@ -40,7 +40,7 @@ async def recognize_receipts_from_url(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
async with FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
url = "https://raw.githubusercontent.com/Azure/azure-sdk-for-python/main/sdk/formrecognizer/azure-ai-formrecognizer/tests/sample_forms/receipt/contoso-receipt.png"
poller = await form_recognizer_client.begin_recognize_receipts_from_url(receipt_url=url)
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_strongly_typing_recognized_form_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_strongly_typing_recognized_form_async.py
index df2cd8ec4ac8..a99faab58315 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_strongly_typing_recognized_form_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_strongly_typing_recognized_form_async.py
@@ -83,7 +83,7 @@ async def strongly_typed_receipt_async(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
async with FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
with open(path_to_sample_forms, "rb") as f:
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_train_model_with_labels_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_train_model_with_labels_async.py
index d521fa68e8f8..535e8b3b6370 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_train_model_with_labels_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_train_model_with_labels_async.py
@@ -45,7 +45,7 @@ async def train_model_with_labels(self):
container_sas_url = os.environ["CONTAINER_SAS_URL_V2"]
form_training_client = FormTrainingClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with form_training_client:
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_train_model_without_labels_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_train_model_without_labels_async.py
index 98881b1a7823..3941012df1ca 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_train_model_without_labels_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_train_model_without_labels_async.py
@@ -43,7 +43,7 @@ async def train_model_without_labels(self):
container_sas_url = os.environ["CONTAINER_SAS_URL_V2"]
async with FormTrainingClient(
- endpoint, AzureKeyCredential(key), api_version="2.1"
+ endpoint, AzureKeyCredential(key)
) as form_training_client:
poller = await form_training_client.begin_training(container_sas_url, use_training_labels=False)
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_authentication.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_authentication.py
index 51eb8ea52a97..9b1279b5ebd4 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_authentication.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_authentication.py
@@ -44,7 +44,7 @@ def authentication_with_api_key_credential_form_recognizer_client(self):
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
- form_recognizer_client = FormRecognizerClient(endpoint, AzureKeyCredential(key), api_version="2.1")
+ form_recognizer_client = FormRecognizerClient(endpoint, AzureKeyCredential(key))
# [END create_fr_client_with_key]
poller = form_recognizer_client.begin_recognize_content_from_url(self.url)
result = poller.result()
@@ -60,7 +60,7 @@ def authentication_with_azure_active_directory_form_recognizer_client(self):
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
credential = DefaultAzureCredential()
- form_recognizer_client = FormRecognizerClient(endpoint, credential, api_version="2.1")
+ form_recognizer_client = FormRecognizerClient(endpoint, credential)
# [END create_fr_client_with_aad]
poller = form_recognizer_client.begin_recognize_content_from_url(self.url)
result = poller.result()
@@ -72,7 +72,7 @@ def authentication_with_api_key_credential_form_training_client(self):
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
- form_training_client = FormTrainingClient(endpoint, AzureKeyCredential(key), api_version="2.1")
+ form_training_client = FormTrainingClient(endpoint, AzureKeyCredential(key))
# [END create_ft_client_with_key]
properties = form_training_client.get_account_properties()
@@ -87,7 +87,7 @@ def authentication_with_azure_active_directory_form_training_client(self):
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
credential = DefaultAzureCredential()
- form_training_client = FormTrainingClient(endpoint, credential, api_version="2.1")
+ form_training_client = FormTrainingClient(endpoint, credential)
# [END create_ft_client_with_aad]
properties = form_training_client.get_account_properties()
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_copy_model.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_copy_model.py
index 42510ef6af04..fe2cefd29af8 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_copy_model.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_copy_model.py
@@ -51,7 +51,7 @@ def copy_model(self, custom_model_id):
target_resource_id = os.environ["AZURE_FORM_RECOGNIZER_TARGET_RESOURCE_ID"]
# [START get_copy_authorization]
- target_client = FormTrainingClient(endpoint=target_endpoint, credential=AzureKeyCredential(target_key), api_version="2.1")
+ target_client = FormTrainingClient(endpoint=target_endpoint, credential=AzureKeyCredential(target_key))
target = target_client.get_copy_authorization(
resource_region=target_region,
@@ -62,7 +62,7 @@ def copy_model(self, custom_model_id):
# [END get_copy_authorization]
# [START begin_copy_model]
- source_client = FormTrainingClient(endpoint=source_endpoint, credential=AzureKeyCredential(source_key), api_version="2.1")
+ source_client = FormTrainingClient(endpoint=source_endpoint, credential=AzureKeyCredential(source_key))
poller = source_client.begin_copy_model(
model_id=source_model_id,
@@ -90,7 +90,7 @@ def copy_model(self, custom_model_id):
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
model = form_training_client.begin_training(os.getenv("CONTAINER_SAS_URL_V2"), use_training_labels=True).result()
model_id = model.model_id
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_create_composed_model.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_create_composed_model.py
index 552841d967f8..fc54f9fa3cb4 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_create_composed_model.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_create_composed_model.py
@@ -52,7 +52,7 @@ def create_composed_model(self):
po_furniture = os.environ['PURCHASE_ORDER_OFFICE_FURNITURE_SAS_URL_V2']
po_cleaning_supplies = os.environ['PURCHASE_ORDER_OFFICE_CLEANING_SUPPLIES_SAS_URL_V2']
- form_training_client = FormTrainingClient(endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1")
+ form_training_client = FormTrainingClient(endpoint=endpoint, credential=AzureKeyCredential(key))
supplies_poller = form_training_client.begin_training(
po_supplies, use_training_labels=True, model_name="Purchase order - Office supplies"
)
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_differentiate_output_labeled_tables.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_differentiate_output_labeled_tables.py
index b0dd8f96f043..dff016c22a24 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_differentiate_output_labeled_tables.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_differentiate_output_labeled_tables.py
@@ -52,7 +52,7 @@ def test_recognize_tables_fixed_rows(self, custom_model_id):
model_id_fixed_rows_table = os.getenv("MODEL_ID_FIXED_ROW_TABLES", custom_model_id)
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__),
@@ -94,7 +94,7 @@ def test_recognize_tables_dynamic_rows(self, custom_model_id):
model_id_dynamic_rows_table = os.getenv("MODEL_ID_DYNAMIC_ROW_TABLES", custom_model_id)
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__),
@@ -146,7 +146,7 @@ def test_recognize_tables_dynamic_rows(self, custom_model_id):
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
if fixed:
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_differentiate_output_models_trained_with_and_without_labels.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_differentiate_output_models_trained_with_and_without_labels.py
index 7ff61356bd5e..0fa15996107f 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_differentiate_output_models_trained_with_and_without_labels.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_differentiate_output_models_trained_with_and_without_labels.py
@@ -56,7 +56,7 @@ def recognize_custom_forms(self, labeled_model_id, unlabeled_model_id):
model_trained_without_labels_id = os.getenv("ID_OF_MODEL_TRAINED_WITHOUT_LABELS", unlabeled_model_id)
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", "./sample_forms/forms/Form_1.jpg"))
@@ -143,7 +143,7 @@ def recognize_custom_forms(self, labeled_model_id, unlabeled_model_id):
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
if labeled:
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_get_bounding_boxes.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_get_bounding_boxes.py
index 9aaa2ae10272..48651b9f2da8 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_get_bounding_boxes.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_get_bounding_boxes.py
@@ -50,7 +50,7 @@ def get_bounding_boxes(self, custom_model_id):
model_id = os.getenv("CUSTOM_TRAINED_MODEL_ID", custom_model_id)
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__),
@@ -131,7 +131,7 @@ def get_bounding_boxes(self, custom_model_id):
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
model = form_training_client.begin_training(os.getenv("CONTAINER_SAS_URL_V2"), use_training_labels=False).result()
model_id = model.model_id
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_manage_custom_models.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_manage_custom_models.py
index cdff4576b85d..7fad685f6071 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_manage_custom_models.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_manage_custom_models.py
@@ -37,7 +37,7 @@ def manage_custom_models(self):
container_sas_url = os.environ["CONTAINER_SAS_URL_V2"]
# [START get_account_properties]
- form_training_client = FormTrainingClient(endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1")
+ form_training_client = FormTrainingClient(endpoint=endpoint, credential=AzureKeyCredential(key))
# First, we see how many custom models we have, and what our limit is
account_properties = form_training_client.get_account_properties()
print("Our account has {} custom models, and we can have at most {} custom models\n".format(
@@ -49,9 +49,9 @@ def manage_custom_models(self):
# [START list_custom_models]
custom_models = form_training_client.list_custom_models()
- # print("We have models with the following IDs:") TODO list models is returning null for some models
- # for model in custom_models:
- # print(model.model_id)
+ print("We have models with the following IDs:")
+ for model in custom_models:
+ print(model.model_id)
# [END list_custom_models]
# let's train a model to use for this sample
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_business_cards.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_business_cards.py
index 416772a53435..53b800c5eea1 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_business_cards.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_business_cards.py
@@ -39,7 +39,7 @@ def recognize_business_card(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
with open(path_to_sample_forms, "rb") as f:
poller = form_recognizer_client.begin_recognize_business_cards(business_card=f, locale="en-US")
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_content.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_content.py
index 26ad101c2d5a..b76b17f48de0 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_content.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_content.py
@@ -46,7 +46,7 @@ def recognize_content(self):
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
- form_recognizer_client = FormRecognizerClient(endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1")
+ form_recognizer_client = FormRecognizerClient(endpoint=endpoint, credential=AzureKeyCredential(key))
with open(path_to_sample_forms, "rb") as f:
poller = form_recognizer_client.begin_recognize_content(form=f)
form_pages = poller.result()
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_custom_forms.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_custom_forms.py
index 68a7663b3952..35b2e20cd6af 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_custom_forms.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_custom_forms.py
@@ -47,7 +47,7 @@ def recognize_custom_forms(self, custom_model_id):
model_id = os.getenv("CUSTOM_TRAINED_MODEL_ID", custom_model_id)
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
# Make sure your form's type is included in the list of form types the custom model can recognize
@@ -120,7 +120,7 @@ def recognize_custom_forms(self, custom_model_id):
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
model = form_training_client.begin_training(os.getenv("CONTAINER_SAS_URL_V2"), use_training_labels=True).result()
model_id = model.model_id
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_identity_documents.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_identity_documents.py
index 218bb16a5d5c..63e4572e6793 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_identity_documents.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_identity_documents.py
@@ -40,7 +40,7 @@ def recognize_identity_documents(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
with open(path_to_sample_forms, "rb") as f:
poller = form_recognizer_client.begin_recognize_identity_documents(identity_document=f)
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_invoices.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_invoices.py
index fbe64b0a95a2..48a529dc21a6 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_invoices.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_invoices.py
@@ -40,7 +40,7 @@ def recognize_invoice(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
with open(path_to_sample_forms, "rb") as f:
poller = form_recognizer_client.begin_recognize_invoices(invoice=f, locale="en-US")
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_receipts.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_receipts.py
index 968b47b79c25..e20945584d26 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_receipts.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_receipts.py
@@ -41,7 +41,7 @@ def recognize_receipts(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
with open(path_to_sample_forms, "rb") as f:
poller = form_recognizer_client.begin_recognize_receipts(receipt=f, locale="en-US")
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_receipts_from_url.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_receipts_from_url.py
index ece40f5ba83c..b3b4a3cc97f5 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_receipts_from_url.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_receipts_from_url.py
@@ -39,7 +39,7 @@ def recognize_receipts_from_url(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
url = "https://raw.githubusercontent.com/Azure/azure-sdk-for-python/main/sdk/formrecognizer/azure-ai-formrecognizer/tests/sample_forms/receipt/contoso-receipt.png"
poller = form_recognizer_client.begin_recognize_receipts_from_url(receipt_url=url)
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_strongly_typing_recognized_form.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_strongly_typing_recognized_form.py
index e74a53105aca..3545d9a4dd02 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_strongly_typing_recognized_form.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_strongly_typing_recognized_form.py
@@ -82,7 +82,7 @@ def strongly_typed_receipt(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
- endpoint=endpoint, credential=AzureKeyCredential(key), api_version="2.1"
+ endpoint=endpoint, credential=AzureKeyCredential(key)
)
with open(path_to_sample_forms, "rb") as f:
poller = form_recognizer_client.begin_recognize_receipts(receipt=f)
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_train_model_with_labels.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_train_model_with_labels.py
index 7e553439ffb6..723487f777b9 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_train_model_with_labels.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_train_model_with_labels.py
@@ -43,7 +43,7 @@ def train_model_with_labels(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
container_sas_url = os.environ["CONTAINER_SAS_URL_V2"]
- form_training_client = FormTrainingClient(endpoint, AzureKeyCredential(key), api_version="2.1")
+ form_training_client = FormTrainingClient(endpoint, AzureKeyCredential(key))
poller = form_training_client.begin_training(
container_sas_url, use_training_labels=True, model_name="mymodel"
)
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_train_model_without_labels.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_train_model_without_labels.py
index d90a6107dd59..422e1d5a790b 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_train_model_without_labels.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_train_model_without_labels.py
@@ -41,7 +41,7 @@ def train_model_without_labels(self):
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
container_sas_url = os.environ["CONTAINER_SAS_URL_V2"]
- form_training_client = FormTrainingClient(endpoint, AzureKeyCredential(key), api_version="2.1")
+ form_training_client = FormTrainingClient(endpoint, AzureKeyCredential(key))
poller = form_training_client.begin_training(container_sas_url, use_training_labels=False)
model = poller.result()
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_business_cards_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_business_cards_async.py
index d0bd07ca84fe..ac2f2231f77f 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_business_cards_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_business_cards_async.py
@@ -13,7 +13,7 @@
This sample demonstrates how to analyze business cards.
See fields found on a business card here:
- https://aka.ms/formrecognizer/businesscardfields
+ https://aka.ms/azsdk/formrecognizer/businesscardfieldschema
USAGE:
python sample_analyze_business_cards_async.py
@@ -64,7 +64,7 @@ async def analyze_business_card_async():
contact_name.value["FirstName"].value,
contact_name.value[
"FirstName"
- ].confidence, # TODO confidence is None
+ ].confidence,
)
)
print(
@@ -72,7 +72,7 @@ async def analyze_business_card_async():
contact_name.value["LastName"].value,
contact_name.value[
"LastName"
- ].confidence, # TODO confidence is None
+ ].confidence,
)
)
company_names = business_card.fields.get("CompanyNames")
@@ -128,7 +128,7 @@ async def analyze_business_card_async():
"Mobile phone number: {} has confidence: {}".format(
phone.content, phone.confidence
)
- ) # TODO value not getting populated
+ )
faxes = business_card.fields.get("Faxes")
if faxes:
for fax in faxes.value:
@@ -136,7 +136,7 @@ async def analyze_business_card_async():
"Fax number: {} has confidence: {}".format(
fax.content, fax.confidence
)
- ) # TODO value not getting populated
+ )
work_phones = business_card.fields.get("WorkPhones")
if work_phones:
for work_phone in work_phones.value:
@@ -144,7 +144,7 @@ async def analyze_business_card_async():
"Work phone number: {} has confidence: {}".format(
work_phone.content, work_phone.confidence
)
- ) # TODO value not getting populated
+ )
other_phones = business_card.fields.get("OtherPhones")
if other_phones:
for other_phone in other_phones.value:
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_identity_documents_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_identity_documents_async.py
index 1d4c66a2a5c2..2f650f2f3807 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_identity_documents_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_identity_documents_async.py
@@ -13,7 +13,7 @@
This sample demonstrates how to analyze an identity document.
See fields found on identity documents here:
- https://aka.ms/formrecognizer/iddocumentfields
+ https://aka.ms/azsdk/formrecognizer/iddocumentfieldschema
USAGE:
python sample_analyze_identity_documents_async.py
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_invoices_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_invoices_async.py
index 8448a628660b..16cb50d4dbad 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_invoices_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_invoices_async.py
@@ -13,7 +13,7 @@
This sample demonstrates how to analyze invoices.
See fields found on a invoice here:
- https://aka.ms/formrecognizer/invoicefields
+ https://aka.ms/azsdk/formrecognizer/invoicefieldschema
USAGE:
python sample_analyze_invoices_async.py
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_document_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_prebuilt_document_async.py
similarity index 89%
rename from sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_document_async.py
rename to sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_prebuilt_document_async.py
index 35392e9917f0..a6379520b2eb 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_document_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_prebuilt_document_async.py
@@ -7,18 +7,14 @@
# --------------------------------------------------------------------------
"""
-FILE: sample_analyze_document_async.py
+FILE: sample_analyze_prebuilt_document_async.py
DESCRIPTION:
This sample demonstrates how to extract general document information from a document
given through a file.
- Note that selection marks returned from begin_analyze_document() do not return the text associated with
- the checkbox. For the API to return this information, build a custom model to analyze the checkbox and its text.
- See sample_build_model_async.py for more information.
-
USAGE:
- python sample_analyze_document_async.py
+ python sample_analyze_prebuilt_document_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
@@ -67,12 +63,10 @@ async def analyze_document():
)
result = await poller.result()
- for idx, style in enumerate(result.styles):
- print(
- "Document contains {} content".format(
- "handwritten" if style.is_handwritten else "no handwritten"
- )
- )
+ for style in result.styles:
+ if style.is_handwritten:
+ print("Document contains handwritten content: ")
+ print(",".join([result.content[span.offset:span.offset + span.length] for span in style.spans]))
for idx, page in enumerate(result.pages):
print("----Analyzing document from page #{}----".format(idx + 1))
@@ -131,7 +125,7 @@ async def analyze_document():
)
for region in cell.bounding_regions:
print(
- "...content on page {} is within bounding box '{}'".format(
+ "...content on page {} is within bounding box '{}'\n".format(
region.page_number,
format_bounding_box(region.bounding_box),
)
@@ -142,7 +136,7 @@ async def analyze_document():
print("Entity of category '{}' with sub-category '{}'".format(entity.category, entity.sub_category))
print("...has content '{}'".format(entity.content))
print("...within '{}' bounding regions".format(format_bounding_region(entity.bounding_regions)))
- print("...with confidence {}".format(entity.confidence))
+ print("...with confidence {}\n".format(entity.confidence))
print("----Key-value pairs found in document----")
for idx, kv_pair in enumerate(result.key_value_pairs):
@@ -155,7 +149,7 @@ async def analyze_document():
)
if kv_pair.value:
print(
- "Value '{}' found within '{}' bounding regions".format(
+ "Value '{}' found within '{}' bounding regions\n".format(
kv_pair.value.content,
format_bounding_region(kv_pair.value.bounding_regions),
)
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_receipts_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_receipts_async.py
index dec4fc0a277b..b5c8419631f1 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_receipts_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_receipts_async.py
@@ -14,7 +14,7 @@
using a pre-trained receipt model.
See fields found on a receipt here:
- https://aka.ms/formrecognizer/receiptfields
+ https://aka.ms/azsdk/formrecognizer/receiptfieldschema
USAGE:
python sample_analyze_receipts_async.py
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_receipts_from_url_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_receipts_from_url_async.py
index 839481373d3e..ec54660885e0 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_receipts_from_url_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_receipts_from_url_async.py
@@ -14,7 +14,7 @@
using a pre-trained receipt model.
See fields found on a receipt here:
- https://aka.ms/formanalyzer/receiptfields
+ https://aka.ms/azsdk/formrecognizer/receiptfieldschema
USAGE:
python sample_analyze_receipts_from_url_async.py
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_business_cards.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_business_cards.py
index c5b0f3f49903..811fa10858f0 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_business_cards.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_business_cards.py
@@ -13,7 +13,7 @@
This sample demonstrates how to analyze business cards.
See fields found on a business card here:
- https://aka.ms/formrecognizer/businesscardfields
+ https://aka.ms/azsdk/formrecognizer/businesscardfieldschema
USAGE:
python sample_analyze_business_cards.py
@@ -61,7 +61,7 @@ def analyze_business_card():
contact_name.value["FirstName"].value,
contact_name.value[
"FirstName"
- ].confidence, # TODO confidence is None
+ ].confidence,
)
)
print(
@@ -69,7 +69,7 @@ def analyze_business_card():
contact_name.value["LastName"].value,
contact_name.value[
"LastName"
- ].confidence, # TODO confidence is None
+ ].confidence,
)
)
company_names = business_card.fields.get("CompanyNames")
@@ -125,7 +125,7 @@ def analyze_business_card():
"Mobile phone number: {} has confidence: {}".format(
phone.content, phone.confidence
)
- ) # TODO value not getting populated
+ )
faxes = business_card.fields.get("Faxes")
if faxes:
for fax in faxes.value:
@@ -133,7 +133,7 @@ def analyze_business_card():
"Fax number: {} has confidence: {}".format(
fax.content, fax.confidence
)
- ) # TODO value not getting populated
+ )
work_phones = business_card.fields.get("WorkPhones")
if work_phones:
for work_phone in work_phones.value:
@@ -141,7 +141,7 @@ def analyze_business_card():
"Work phone number: {} has confidence: {}".format(
work_phone.content, work_phone.confidence
)
- ) # TODO value not getting populated
+ )
other_phones = business_card.fields.get("OtherPhones")
if other_phones:
for other_phone in other_phones.value:
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_identity_documents.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_identity_documents.py
index b645bfcb2c27..794f8d915c1a 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_identity_documents.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_identity_documents.py
@@ -13,7 +13,7 @@
This sample demonstrates how to analyze an identity document.
See fields found on identity documents here:
- https://aka.ms/formrecognizer/iddocumentfields
+ https://aka.ms/azsdk/formrecognizer/iddocumentfieldschema
USAGE:
python sample_analyze_identity_documents.py
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_invoices.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_invoices.py
index 514e12bb7d7a..40a3eefef5bc 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_invoices.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_invoices.py
@@ -13,7 +13,7 @@
This sample demonstrates how to analyze invoices.
See fields found on a invoice here:
- https://aka.ms/formrecognizer/invoicefields
+ https://aka.ms/azsdk/formrecognizer/invoicefieldschema
USAGE:
python sample_analyze_invoices.py
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_document.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_prebuilt_document.py
similarity index 89%
rename from sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_document.py
rename to sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_prebuilt_document.py
index 21f96b83e607..e7ab3f134fab 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_document.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_prebuilt_document.py
@@ -7,18 +7,14 @@
# --------------------------------------------------------------------------
"""
-FILE: sample_analyze_document.py
+FILE: sample_analyze_prebuilt_document.py
DESCRIPTION:
This sample demonstrates how to extract general document information from a document
given through a file.
- Note that selection marks returned from begin_analyze_document() do not return the text associated with
- the checkbox. For the API to return this information, build a custom model to analyze the checkbox and its text.
- See sample_build_model.py for more information.
-
USAGE:
- python sample_analyze_document.py
+ python sample_analyze_prebuilt_document.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
@@ -63,12 +59,10 @@ def analyze_document():
)
result = poller.result()
- for idx, style in enumerate(result.styles):
- print(
- "Document contains {} content".format(
- "handwritten" if style.is_handwritten else "no handwritten"
- )
- )
+ for style in result.styles:
+ if style.is_handwritten:
+ print("Document contains handwritten content: ")
+ print(",".join([result.content[span.offset:span.offset + span.length] for span in style.spans]))
for page in result.pages:
print("----Analyzing document from page #{}----".format(page.page_number))
@@ -127,7 +121,7 @@ def analyze_document():
)
for region in cell.bounding_regions:
print(
- "...content on page {} is within bounding box '{}'".format(
+ "...content on page {} is within bounding box '{}'\n".format(
region.page_number,
format_bounding_box(region.bounding_box),
)
@@ -138,7 +132,7 @@ def analyze_document():
print("Entity of category '{}' with sub-category '{}'".format(entity.category, entity.sub_category))
print("...has content '{}'".format(entity.content))
print("...within '{}' bounding regions".format(format_bounding_region(entity.bounding_regions)))
- print("...with confidence {}".format(entity.confidence))
+ print("...with confidence {}\n".format(entity.confidence))
print("----Key-value pairs found in document----")
for kv_pair in result.key_value_pairs:
@@ -151,7 +145,7 @@ def analyze_document():
)
if kv_pair.value:
print(
- "Value '{}' found within '{}' bounding regions".format(
+ "Value '{}' found within '{}' bounding regions\n".format(
kv_pair.value.content,
format_bounding_region(kv_pair.value.bounding_regions),
)
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_receipts.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_receipts.py
index 57aec6734763..bd9f45559ede 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_receipts.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_receipts.py
@@ -14,7 +14,7 @@
using a pre-trained receipt model.
See fields found on a receipt here:
- https://aka.ms/formrecognizer/receiptfields
+ https://aka.ms/azsdk/formrecognizer/receiptfieldschema
USAGE:
python sample_analyze_receipts.py
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_receipts_from_url.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_receipts_from_url.py
index c56da0efb187..b4c300a5dc37 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_receipts_from_url.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_analyze_receipts_from_url.py
@@ -14,7 +14,7 @@
using a pre-trained receipt model.
See fields found on a receipt here:
- https://aka.ms/formanalyzer/receiptfields
+ https://aka.ms/azsdk/formrecognizer/receiptfieldschema
USAGE:
python sample_analyze_receipts_from_url.py
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt.test_receipt_locale_error.yaml b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt.test_receipt_locale_error.yaml
index 27d57ee0e053..dc3ad49cd758 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt.test_receipt_locale_error.yaml
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt.test_receipt_locale_error.yaml
@@ -12,31 +12,31 @@ interactions:
Content-Length:
- '154512'
Content-Type:
- - image/jpeg
+ - application/octet-stream
User-Agent:
- - azsdk-python-ai-formrecognizer/3.2.0b1 Python/3.9.1 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-ai-formrecognizer/3.2.0b1 Python/3.9.0 (Windows-10-10.0.19041-SP0)
method: POST
- uri: https://region.api.cognitive.microsoft.com/formrecognizer/v2.1/prebuilt/receipt/analyze?includeTextDetails=false&locale=not%20a%20locale
+ uri: https://region.api.cognitive.microsoft.com/formrecognizer/documentModels/prebuilt-receipt:analyze?locale=not%20a%20locale&stringIndexType=unicodeCodePoint&api-version=2021-09-30-preview
response:
body:
- string: '{"error": {"code": "UnsupportedLocale", "innerError": {"requestId":
- "c6836b61-db64-4355-8b25-e1948f564d44"}, "message": "Locale unsupported. Supported
- locales include en-AU, en-CA, en-GB, en-IN and en-US."}}'
+ string: '{"error": {"code": "InvalidArgument", "message": "Invalid argument.",
+ "innererror": {"code": "InvalidParameter", "message": "The parameter Locale
+ is invalid: The language code is invalid or not supported."}}}'
headers:
apim-request-id:
- - c6836b61-db64-4355-8b25-e1948f564d44
- content-length:
- - '200'
+ - edf133b9-e46e-458e-a38e-3a148a6ed1f2
content-type:
- application/json; charset=utf-8
date:
- - Mon, 30 Aug 2021 23:48:59 GMT
+ - Thu, 30 Sep 2021 17:06:02 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '113'
+ - '135'
status:
code: 400
message: Bad Request
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt_async.test_receipt_locale_error.yaml b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt_async.test_receipt_locale_error.yaml
index 64cdcdc65abf..93331c0b1c52 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt_async.test_receipt_locale_error.yaml
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt_async.test_receipt_locale_error.yaml
@@ -6,26 +6,26 @@ interactions:
Accept:
- application/json
Content-Type:
- - image/jpeg
+ - application/octet-stream
User-Agent:
- - azsdk-python-ai-formrecognizer/3.1.0 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-ai-formrecognizer/3.2.0b1 Python/3.9.0 (Windows-10-10.0.19041-SP0)
method: POST
- uri: https://region.api.cognitive.microsoft.com/formrecognizer/v2.1/prebuilt/receipt/analyze?includeTextDetails=false&locale=not%20a%20locale
+ uri: https://region.api.cognitive.microsoft.com/formrecognizer/documentModels/prebuilt-receipt:analyze?locale=not%20a%20locale&stringIndexType=unicodeCodePoint&api-version=2021-09-30-preview
response:
body:
- string: '{"error": {"code": "UnsupportedLocale", "innerError": {"requestId":
- "4c27b38d-d0e4-4b41-b76a-0422a7b924b9"}, "message": "Locale unsupported. Supported
- locales include en-AU, en-CA, en-GB, en-IN and en-US."}}'
+ string: '{"error": {"code": "InvalidArgument", "message": "Invalid argument.",
+ "innererror": {"code": "InvalidParameter", "message": "The parameter Locale
+ is invalid: The language code is invalid or not supported."}}}'
headers:
- apim-request-id: 4c27b38d-d0e4-4b41-b76a-0422a7b924b9
- content-length: '200'
+ apim-request-id: 8f6bc0ec-dfbe-4d03-895f-b0cf4bec6192
content-type: application/json; charset=utf-8
- date: Tue, 11 May 2021 03:08:08 GMT
+ date: Thu, 30 Sep 2021 17:06:19 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
+ transfer-encoding: chunked
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '134'
+ x-envoy-upstream-service-time: '69'
status:
code: 400
message: Bad Request
- url: https://region.api.cognitive.microsoft.com/formrecognizer/v2.1/prebuilt/receipt/analyze?includeTextDetails=false&locale=not%20a%20locale
+ url: https://region.api.cognitive.microsoft.com/formrecognizer/documentModels/prebuilt-receipt:analyze?locale=not%20a%20locale&stringIndexType=unicodeCodePoint&api-version=2021-09-30-preview
version: 1
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt_from_url.test_receipt_locale_error.yaml b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt_from_url.test_receipt_locale_error.yaml
index 8ed6afb3dcc7..36a7cee92635 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt_from_url.test_receipt_locale_error.yaml
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt_from_url.test_receipt_locale_error.yaml
@@ -1,6 +1,6 @@
interactions:
- request:
- body: 'b''{"source": "blob_sas_url"}'''
+ body: 'b''{"urlSource": "blob_sas_url"}'''
headers:
Accept:
- application/json
@@ -9,33 +9,33 @@ interactions:
Connection:
- keep-alive
Content-Length:
- - '221'
+ - '224'
Content-Type:
- application/json
User-Agent:
- - azsdk-python-ai-formrecognizer/3.1.0 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-ai-formrecognizer/3.2.0b1 Python/3.9.0 (Windows-10-10.0.19041-SP0)
method: POST
- uri: https://region.api.cognitive.microsoft.com/formrecognizer/v2.1/prebuilt/receipt/analyze?includeTextDetails=false&locale=not%20a%20locale
+ uri: https://region.api.cognitive.microsoft.com/formrecognizer/documentModels/prebuilt-receipt:analyze?locale=not%20a%20locale&stringIndexType=unicodeCodePoint&api-version=2021-09-30-preview
response:
body:
- string: '{"error": {"code": "UnsupportedLocale", "innerError": {"requestId":
- "04a5530c-7246-4327-9661-6579f4ac252b"}, "message": "Locale unsupported. Supported
- locales include en-AU, en-CA, en-GB, en-IN and en-US."}}'
+ string: '{"error": {"code": "InvalidArgument", "message": "Invalid argument.",
+ "innererror": {"code": "InvalidParameter", "message": "The parameter Locale
+ is invalid: The language code is invalid or not supported."}}}'
headers:
apim-request-id:
- - 04a5530c-7246-4327-9661-6579f4ac252b
- content-length:
- - '200'
+ - 37eba60a-f330-4c8f-94cf-8747ba3c2fc5
content-type:
- application/json; charset=utf-8
date:
- - Tue, 11 May 2021 03:09:31 GMT
+ - Thu, 30 Sep 2021 17:05:04 GMT
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
x-content-type-options:
- nosniff
x-envoy-upstream-service-time:
- - '289'
+ - '564'
status:
code: 400
message: Bad Request
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt_from_url_async.test_receipt_locale_error.yaml b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt_from_url_async.test_receipt_locale_error.yaml
index 1fbd527eb3f5..7da46986f3da 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt_from_url_async.test_receipt_locale_error.yaml
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_receipt_from_url_async.test_receipt_locale_error.yaml
@@ -1,32 +1,32 @@
interactions:
- request:
- body: 'b''{"source": "blob_sas_url"}'''
+ body: 'b''{"urlSource": "blob_sas_url"}'''
headers:
Accept:
- application/json
Content-Length:
- - '221'
+ - '224'
Content-Type:
- application/json
User-Agent:
- - azsdk-python-ai-formrecognizer/3.1.0 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-ai-formrecognizer/3.2.0b1 Python/3.9.0 (Windows-10-10.0.19041-SP0)
method: POST
- uri: https://region.api.cognitive.microsoft.com/formrecognizer/v2.1/prebuilt/receipt/analyze?includeTextDetails=false&locale=not%20a%20locale
+ uri: https://region.api.cognitive.microsoft.com/formrecognizer/documentModels/prebuilt-receipt:analyze?locale=not%20a%20locale&stringIndexType=unicodeCodePoint&api-version=2021-09-30-preview
response:
body:
- string: '{"error": {"code": "UnsupportedLocale", "innerError": {"requestId":
- "c72b100e-f260-4bd3-874f-3354e1b08e83"}, "message": "Locale unsupported. Supported
- locales include en-AU, en-CA, en-GB, en-IN and en-US."}}'
+ string: '{"error": {"code": "InvalidArgument", "message": "Invalid argument.",
+ "innererror": {"code": "InvalidParameter", "message": "The parameter Locale
+ is invalid: The language code is invalid or not supported."}}}'
headers:
- apim-request-id: c72b100e-f260-4bd3-874f-3354e1b08e83
- content-length: '200'
+ apim-request-id: e6c4d122-9c19-4bfb-9d69-f5914cfc29b4
content-type: application/json; charset=utf-8
- date: Tue, 11 May 2021 03:12:51 GMT
+ date: Thu, 30 Sep 2021 17:06:34 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
+ transfer-encoding: chunked
x-content-type-options: nosniff
- x-envoy-upstream-service-time: '897'
+ x-envoy-upstream-service-time: '581'
status:
code: 400
message: Bad Request
- url: https://region.api.cognitive.microsoft.com/formrecognizer/v2.1/prebuilt/receipt/analyze?includeTextDetails=false&locale=not%20a%20locale
+ url: https://region.api.cognitive.microsoft.com/formrecognizer/documentModels/prebuilt-receipt:analyze?locale=not%20a%20locale&stringIndexType=unicodeCodePoint&api-version=2021-09-30-preview
version: 1
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_logging.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_logging.py
index 0f298dd2a1a5..081b719554c8 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_logging.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_logging.py
@@ -13,7 +13,7 @@
except ImportError: # python < 3.3
import mock # type: ignore
-from azure.ai.formrecognizer import FormRecognizerClient, FormTrainingClient
+from azure.ai.formrecognizer import DocumentAnalysisClient, DocumentModelAdministrationClient
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import HttpResponseError
from testcase import FormRecognizerTest
@@ -27,20 +27,20 @@ def __init__(self):
def emit(self, record):
self.messages.append(record)
-@pytest.mark.skip
+
class TestLogging(FormRecognizerTest):
@FormRecognizerPreparer()
@pytest.mark.live_test_only
- def test_logging_info_fr_client(self, formrecognizer_test_endpoint, formrecognizer_test_api_key):
- client = FormRecognizerClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key))
+ def test_logging_info_dac_client(self, formrecognizer_test_endpoint, formrecognizer_test_api_key):
+ client = DocumentAnalysisClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key))
mock_handler = MockHandler()
logger = logging.getLogger("azure")
logger.addHandler(mock_handler)
logger.setLevel(logging.INFO)
- poller = client.begin_recognize_invoices_from_url(self.receipt_url_jpg)
+ poller = client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg)
result = poller.result()
for message in mock_handler.messages:
@@ -53,15 +53,15 @@ def test_logging_info_fr_client(self, formrecognizer_test_endpoint, formrecogniz
@FormRecognizerPreparer()
@pytest.mark.live_test_only
- def test_logging_info_ft_client(self, formrecognizer_test_endpoint, formrecognizer_test_api_key):
- client = FormTrainingClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key))
+ def test_logging_info_dmac_client(self, formrecognizer_test_endpoint, formrecognizer_test_api_key):
+ client = DocumentModelAdministrationClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key))
mock_handler = MockHandler()
logger = logging.getLogger("azure")
logger.addHandler(mock_handler)
logger.setLevel(logging.INFO)
- result = client.get_account_properties()
+ result = client.get_account_info()
for message in mock_handler.messages:
if message.levelname == "INFO":
@@ -86,10 +86,10 @@ def test_mock_quota_exceeded_403(self, formrecognizer_test_endpoint, formrecogni
response.content_type = "application/json"
transport = mock.Mock(send=lambda request, **kwargs: response)
- client = FormRecognizerClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key), transport=transport)
+ client = DocumentAnalysisClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key), transport=transport)
with pytest.raises(HttpResponseError) as e:
- poller = client.begin_recognize_receipts_from_url(self.receipt_url_jpg)
+ poller = client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg)
assert e.value.status_code == 403
assert e.value.error.message == 'Out of call volume quota for FormRecognizer F0 pricing tier. Please retry after 1 day. To increase your call volume switch to a paid tier.'
@@ -108,9 +108,9 @@ def test_mock_quota_exceeded_429(self, formrecognizer_test_endpoint, formrecogni
response.content_type = "application/json"
transport = mock.Mock(send=lambda request, **kwargs: response)
- client = FormRecognizerClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key), transport=transport)
+ client = DocumentAnalysisClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key), transport=transport)
with pytest.raises(HttpResponseError) as e:
- poller = client.begin_recognize_receipts_from_url(self.receipt_url_jpg)
+ poller = client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg)
assert e.value.status_code == 429
assert e.value.error.message == 'Out of call volume quota for FormRecognizer F0 pricing tier. Please retry after 1 day. To increase your call volume switch to a paid tier.'
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_logging_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_logging_async.py
index 5d0a0474bbe1..774e53b255f5 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_logging_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_logging_async.py
@@ -15,7 +15,7 @@
from unittest import mock
except ImportError: # python < 3.3
import mock # type: ignore
-from azure.ai.formrecognizer.aio import FormRecognizerClient, FormTrainingClient
+from azure.ai.formrecognizer.aio import DocumentAnalysisClient, DocumentModelAdministrationClient
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import HttpResponseError
from preparers import FormRecognizerPreparer
@@ -62,20 +62,20 @@ def __init__(self, *args, **kwargs):
self.__aenter__ = mock.Mock(return_value=get_completed_future())
self.__aexit__ = mock.Mock(return_value=get_completed_future())
-@pytest.mark.skip
+
class TestLogging(AsyncFormRecognizerTest):
@FormRecognizerPreparer()
@pytest.mark.live_test_only
- async def test_logging_info_fr_client(self, formrecognizer_test_endpoint, formrecognizer_test_api_key):
- client = FormRecognizerClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key))
+ async def test_logging_info_dac_client(self, formrecognizer_test_endpoint, formrecognizer_test_api_key):
+ client = DocumentAnalysisClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key))
mock_handler = MockHandler()
logger = logging.getLogger("azure")
logger.addHandler(mock_handler)
logger.setLevel(logging.INFO)
async with client:
- poller = await client.begin_recognize_invoices_from_url(self.receipt_url_jpg)
+ poller = await client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg)
result = await poller.result()
for message in mock_handler.messages:
@@ -88,15 +88,15 @@ async def test_logging_info_fr_client(self, formrecognizer_test_endpoint, formre
@FormRecognizerPreparer()
@pytest.mark.live_test_only
- async def test_logging_info_ft_client(self, formrecognizer_test_endpoint, formrecognizer_test_api_key):
- client = FormTrainingClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key))
+ async def test_logging_info_dmac_client(self, formrecognizer_test_endpoint, formrecognizer_test_api_key):
+ client = DocumentModelAdministrationClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key))
mock_handler = MockHandler()
logger = logging.getLogger("azure")
logger.addHandler(mock_handler)
logger.setLevel(logging.INFO)
async with client:
- result = await client.get_account_properties()
+ result = await client.get_account_info()
for message in mock_handler.messages:
if message.levelname == "INFO":
@@ -121,10 +121,10 @@ async def test_mock_quota_exceeded_403(self, formrecognizer_test_endpoint, formr
response.content_type = "application/json"
transport = AsyncMockTransport(send=wrap_in_future(lambda request, **kwargs: response))
- client = FormRecognizerClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key), transport=transport)
+ client = DocumentAnalysisClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key), transport=transport)
with pytest.raises(HttpResponseError) as e:
- poller = await client.begin_recognize_receipts_from_url(self.receipt_url_jpg)
+ poller = await client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg)
assert e.value.status_code == 403
assert e.value.error.message == 'Out of call volume quota for FormRecognizer F0 pricing tier. Please retry after 1 day. To increase your call volume switch to a paid tier.'
@@ -142,8 +142,8 @@ async def test_mock_quota_exceeded_429(self, formrecognizer_test_endpoint, formr
response.content_type = "application/json"
transport = AsyncMockTransport(send=wrap_in_future(lambda request, **kwargs: response))
- client = FormRecognizerClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key), transport=transport)
+ client = DocumentAnalysisClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key), transport=transport)
with pytest.raises(HttpResponseError) as e:
- poller = await client.begin_recognize_receipts_from_url(self.receipt_url_jpg)
+ poller = await client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg)
assert e.value.status_code == 429
- assert e.value.error.message == 'Out of call volume quota for FormRecognizer F0 pricing tier. Please retry after 1 day. To increase your call volume switch to a paid tier.'
\ No newline at end of file
+ assert e.value.error.message == 'Out of call volume quota for FormRecognizer F0 pricing tier. Please retry after 1 day. To increase your call volume switch to a paid tier.'
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt.py
index dbd32752e8f6..cbb417410f23 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt.py
@@ -27,7 +27,6 @@
class TestManagement(FormRecognizerTest):
- @pytest.mark.skip("aad not working in canary")
@FormRecognizerPreparer()
@pytest.mark.live_test_only
def test_active_directory_auth(self):
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt_async.py
index c56f1d12c1a9..e4c6301567de 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt_async.py
@@ -29,7 +29,6 @@
class TestManagementAsync(AsyncFormRecognizerTest):
- @pytest.mark.skip("no aad yet in private preview")
@FormRecognizerPreparer()
@pytest.mark.live_test_only
async def test_active_directory_auth_async(self):
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt.py
index 68e399005f9b..16888d669c22 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt.py
@@ -388,13 +388,12 @@ def test_receipt_locale_specified(self, client):
@FormRecognizerPreparer()
@GlobalClientPreparer()
- @pytest.mark.skip("the service is returning a different error code")
def test_receipt_locale_error(self, client):
with open(self.receipt_jpg, "rb") as fd:
receipt = fd.read()
with pytest.raises(HttpResponseError) as e:
client.begin_analyze_document("prebuilt-receipt", receipt, locale="not a locale")
- assert "UnsupportedLocale" == e.value.error.code
+ assert "InvalidArgument" == e.value.error.code
@FormRecognizerPreparer()
@GlobalClientPreparerV2(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0})
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_async.py
index 538432c44bcf..e097572866c2 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_async.py
@@ -400,14 +400,13 @@ async def test_receipt_locale_specified(self, client):
@FormRecognizerPreparer()
@GlobalClientPreparer()
- @pytest.mark.skip("the service is returning a different error code")
async def test_receipt_locale_error(self, client):
with open(self.receipt_jpg, "rb") as fd:
receipt = fd.read()
with pytest.raises(HttpResponseError) as e:
async with client:
await client.begin_analyze_document("prebuilt-receipt", receipt, locale="not a locale")
- assert "UnsupportedLocale" == e.value.error.code
+ assert "InvalidArgument" == e.value.error.code
@FormRecognizerPreparer()
@GlobalClientPreparerV2(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0})
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url.py
index da50faad515b..072f11882508 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url.py
@@ -35,7 +35,6 @@ def test_polling_interval(self, formrecognizer_test_endpoint, formrecognizer_tes
self.assertEqual(poller2._polling_method._timeout, 7) # goes back to client default
@pytest.mark.live_test_only
- @pytest.mark.skip("aad not enabled yet in v2021-07-30")
def test_active_directory_auth(self):
token = self.generate_oauth_token()
endpoint = self.get_oauth_endpoint()
@@ -300,11 +299,10 @@ def test_receipt_locale_specified(self, client):
@FormRecognizerPreparer()
@GlobalClientPreparer()
- @pytest.mark.skip("different error code being returned")
def test_receipt_locale_error(self, client):
with pytest.raises(HttpResponseError) as e:
client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg, locale="not a locale")
- assert "UnsupportedLocale" == e.value.error.code
+ assert "InvalidArgument" == e.value.error.code
@FormRecognizerPreparer()
@GlobalClientPreparerV2(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0})
diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url_async.py
index 14737014a0f0..c26ec7acb7cc 100644
--- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url_async.py
+++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url_async.py
@@ -39,7 +39,6 @@ async def test_polling_interval(self, formrecognizer_test_endpoint, formrecogniz
@pytest.mark.live_test_only
@FormRecognizerPreparer()
- @pytest.mark.skip("aad not enabled yet in v2021-07-30")
async def test_active_directory_auth_async(self):
token = self.generate_oauth_token()
endpoint = self.get_oauth_endpoint()
@@ -333,12 +332,11 @@ async def test_receipt_locale_specified(self, client):
@FormRecognizerPreparer()
@GlobalClientPreparer()
- @pytest.mark.skip("different error code being returned")
async def test_receipt_locale_error(self, client):
with pytest.raises(HttpResponseError) as e:
async with client:
await client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg, locale="not a locale")
- assert "UnsupportedLocale" == e.value.error.code
+ assert "InvalidArgument" == e.value.error.code
@FormRecognizerPreparer()
@GlobalClientPreparerV2(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0})
diff --git a/sdk/identity/azure-identity/CHANGELOG.md b/sdk/identity/azure-identity/CHANGELOG.md
index 49c726f3f0a2..64847d7e5a20 100644
--- a/sdk/identity/azure-identity/CHANGELOG.md
+++ b/sdk/identity/azure-identity/CHANGELOG.md
@@ -2,13 +2,15 @@
## 1.7.0b5 (Unreleased)
-### Features Added
-
### Breaking Changes
+> These changes do not impact the API of stable versions such as 1.6.0.
+> Only code written against a beta version such as 1.7.0b1 may be affected.
-### Bugs Fixed
-
-### Other Changes
+- The `allow_multitenant_authentication` argument has been removed and the default behavior is now as if it were true.
+ The multitenant authentication feature can be totally disabled by setting the environment variable
+ `AZURE_IDENTITY_DISABLE_MULTITENANTAUTH` to `True`.
+- `azure.identity.RegionalAuthority` is removed.
+- `regional_authority` argument is removed for `CertificateCredential` and `ClientSecretCredential`
## 1.7.0b4 (2021-09-09)
diff --git a/sdk/identity/azure-identity/README.md b/sdk/identity/azure-identity/README.md
index 0f3c12139e16..6306917ccd5b 100644
--- a/sdk/identity/azure-identity/README.md
+++ b/sdk/identity/azure-identity/README.md
@@ -9,6 +9,10 @@ This library does not support Azure Active Directory B2C.
| [API reference documentation][ref_docs]
| [Azure Active Directory documentation](https://docs.microsoft.com/azure/active-directory/)
+## _Disclaimer_
+
+_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_
+
## Getting started
### Install the package
diff --git a/sdk/identity/azure-identity/azure/identity/__init__.py b/sdk/identity/azure-identity/azure/identity/__init__.py
index 4d7dff365695..0969ad2e0504 100644
--- a/sdk/identity/azure-identity/azure/identity/__init__.py
+++ b/sdk/identity/azure-identity/azure/identity/__init__.py
@@ -5,7 +5,6 @@
"""Credentials for Azure SDK clients."""
from ._auth_record import AuthenticationRecord
-from ._enums import RegionalAuthority
from ._exceptions import AuthenticationRequiredError, CredentialUnavailableError
from ._constants import AzureAuthorityHosts, KnownAuthorities
from ._credentials import (
@@ -47,7 +46,6 @@
"InteractiveBrowserCredential",
"KnownAuthorities",
"OnBehalfOfCredential",
- "RegionalAuthority",
"ManagedIdentityCredential",
"SharedTokenCacheCredential",
"TokenCachePersistenceOptions",
diff --git a/sdk/identity/azure-identity/azure/identity/_constants.py b/sdk/identity/azure-identity/azure/identity/_constants.py
index 878d7f6bce7f..4cf9fb2d9287 100644
--- a/sdk/identity/azure-identity/azure/identity/_constants.py
+++ b/sdk/identity/azure-identity/azure/identity/_constants.py
@@ -44,7 +44,7 @@ class EnvironmentVariables:
MSI_SECRET = "MSI_SECRET"
AZURE_AUTHORITY_HOST = "AZURE_AUTHORITY_HOST"
- AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION = "AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION"
+ AZURE_IDENTITY_DISABLE_MULTITENANTAUTH = "AZURE_IDENTITY_DISABLE_MULTITENANTAUTH"
AZURE_REGIONAL_AUTHORITY_NAME = "AZURE_REGIONAL_AUTHORITY_NAME"
AZURE_FEDERATED_TOKEN_FILE = "AZURE_FEDERATED_TOKEN_FILE"
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/application.py b/sdk/identity/azure-identity/azure/identity/_credentials/application.py
index 46a84e13eb7b..abb22fbaabac 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/application.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/application.py
@@ -48,10 +48,6 @@ class AzureApplicationCredential(ChainedTokenCredential):
`_ for an overview of
managed identities.
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the application or user is registered in. When False, which is the default, the credential will acquire tokens
- only from the tenant specified by **AZURE_TENANT_ID**. This argument doesn't apply to managed identity
- authentication.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example "login.microsoftonline.com",
the authority for Azure Public Cloud, which is the default when no value is given for this keyword argument or
environment variable AZURE_AUTHORITY_HOST. :class:`~azure.identity.AzureAuthorityHosts` defines authorities for
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/authorization_code.py b/sdk/identity/azure-identity/azure/identity/_credentials/authorization_code.py
index 587547640744..7eae087b837c 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/authorization_code.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/authorization_code.py
@@ -30,9 +30,6 @@ class AuthorizationCodeCredential(GetTokenMixin):
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds.
:keyword str client_secret: One of the application's client secrets. Required only for web apps and web APIs.
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the user is registered in. When False, which is the default, the credential will acquire tokens only from the
- user's home tenant or the tenant specified by **tenant_id**.
"""
def __init__(self, tenant_id, client_id, authorization_code, redirect_uri, **kwargs):
@@ -67,8 +64,7 @@ def get_token(self, *scopes, **kwargs):
redeeming the authorization code.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
:raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The error's ``message``
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/azure_cli.py b/sdk/identity/azure-identity/azure/identity/_credentials/azure_cli.py
index d535a286adb7..a94e001852ec 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/azure_cli.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/azure_cli.py
@@ -35,15 +35,8 @@ class AzureCliCredential(object):
"""Authenticates by requesting a token from the Azure CLI.
This requires previously logging in to Azure via "az login", and will use the CLI's currently logged in identity.
-
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the identity logged in to the Azure CLI is registered in. When False, which is the default, the credential will
- acquire tokens only from the tenant of the Azure CLI's active subscription.
"""
- def __init__(self, **kwargs):
- self._allow_multitenant = kwargs.get("allow_multitenant_authentication", False)
-
def __enter__(self):
return self
@@ -55,7 +48,7 @@ def close(self):
"""Calling this method is unnecessary."""
@log_get_token("AzureCliCredential")
- def get_token(self, *scopes, **kwargs):
+ def get_token(self, *scopes, **kwargs): # pylint: disable=no-self-use
# type: (*str, **Any) -> AccessToken
"""Request an access token for `scopes`.
@@ -63,8 +56,7 @@ def get_token(self, *scopes, **kwargs):
also handle token caching because this credential doesn't cache the tokens it acquires.
:param str scopes: desired scope for the access token. This credential allows only one scope per request.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
@@ -75,7 +67,7 @@ def get_token(self, *scopes, **kwargs):
resource = _scopes_to_resource(*scopes)
command = COMMAND_LINE.format(resource)
- tenant = resolve_tenant("", self._allow_multitenant, **kwargs)
+ tenant = resolve_tenant("", **kwargs)
if tenant:
command += " --tenant " + tenant
output = _run_command(command)
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/azure_powershell.py b/sdk/identity/azure-identity/azure/identity/_credentials/azure_powershell.py
index 17869fbde253..78c34b81d76e 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/azure_powershell.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/azure_powershell.py
@@ -51,16 +51,8 @@ class AzurePowerShellCredential(object):
"""Authenticates by requesting a token from Azure PowerShell.
This requires previously logging in to Azure via "Connect-AzAccount", and will use the currently logged in identity.
-
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the identity logged in to Azure PowerShell is registered in. When False, which is the default, the credential
- will acquire tokens only from the tenant of Azure PowerShell's active subscription.
"""
- def __init__(self, **kwargs):
- # type: (**Any) -> None
- self._allow_multitenant = kwargs.get("allow_multitenant_authentication", False)
-
def __enter__(self):
return self
@@ -72,7 +64,7 @@ def close(self):
"""Calling this method is unnecessary."""
@log_get_token("AzurePowerShellCredential")
- def get_token(self, *scopes, **kwargs):
+ def get_token(self, *scopes, **kwargs): # pylint: disable=no-self-use
# type: (*str, **Any) -> AccessToken
"""Request an access token for `scopes`.
@@ -80,8 +72,7 @@ def get_token(self, *scopes, **kwargs):
also handle token caching because this credential doesn't cache the tokens it acquires.
:param str scopes: desired scope for the access token. This credential allows only one scope per request.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
@@ -90,7 +81,7 @@ def get_token(self, *scopes, **kwargs):
:raises ~azure.core.exceptions.ClientAuthenticationError: the credential invoked Azure PowerShell but didn't
receive an access token
"""
- tenant_id = resolve_tenant("", self._allow_multitenant, **kwargs)
+ tenant_id = resolve_tenant("", **kwargs)
command_line = get_command_line(scopes, tenant_id)
output = run_command_line(command_line)
token = parse_token(output)
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/browser.py b/sdk/identity/azure-identity/azure/identity/_credentials/browser.py
index 6aead5b26f47..5b624046a7ac 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/browser.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/browser.py
@@ -51,9 +51,6 @@ class InteractiveBrowserCredential(InteractiveCredential):
will cache tokens in memory.
:paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions
:keyword int timeout: seconds to wait for the user to complete authentication. Defaults to 300 (5 minutes).
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the user is registered in. When False, which is the default, the credential will acquire tokens only from the
- user's home tenant or the tenant specified by **tenant_id**.
:raises ValueError: invalid **redirect_uri**
"""
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/certificate.py b/sdk/identity/azure-identity/azure/identity/_credentials/certificate.py
index cdb999d41898..1169fb178942 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/certificate.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/certificate.py
@@ -39,18 +39,12 @@ class CertificateCredential(ClientCredentialBase):
:keyword password: The certificate's password. If a unicode string, it will be encoded as UTF-8. If the certificate
requires a different encoding, pass appropriately encoded bytes instead.
:paramtype password: str or bytes
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the application is registered in. When False, which is the default, the credential will acquire tokens only from
- the tenant specified by **tenant_id**.
:keyword bool send_certificate_chain: if True, the credential will send the public certificate chain in the x5c
header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. Defaults to
False.
:keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential
will cache tokens in memory.
:paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions
- :keyword ~azure.identity.RegionalAuthority regional_authority: a :class:`~azure.identity.RegionalAuthority` to
- which the credential will authenticate. This argument should be used only by applications deployed to Azure
- VMs.
"""
def __init__(self, tenant_id, client_id, certificate_path=None, **kwargs):
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/client_secret.py b/sdk/identity/azure-identity/azure/identity/_credentials/client_secret.py
index 9623b0ef8b1d..4b68e401a023 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/client_secret.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/client_secret.py
@@ -21,15 +21,9 @@ class ClientSecretCredential(ClientCredentialBase):
:keyword str authority: Authority of an Azure Active Directory endpoint, for example "login.microsoftonline.com",
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds.
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the application is registered in. When False, which is the default, the credential will acquire tokens only from
- the tenant specified by **tenant_id**.
:keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential
will cache tokens in memory.
:paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions
- :keyword ~azure.identity.RegionalAuthority regional_authority: a :class:`~azure.identity.RegionalAuthority` to
- which the credential will authenticate. This argument should be used only by applications deployed to Azure
- VMs.
"""
def __init__(self, tenant_id, client_id, client_secret, **kwargs):
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/default.py b/sdk/identity/azure-identity/azure/identity/_credentials/default.py
index 90ddc1b39b53..75c2111362bb 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/default.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/default.py
@@ -47,9 +47,6 @@ class DefaultAzureCredential(ChainedTokenCredential):
This default behavior is configurable with keyword arguments.
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the application is registered in. When False, which is the default, the credential will acquire tokens only from
- its configured tenant. This argument doesn't apply to managed identity authentication.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com',
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds. Managed identities ignore this because they reside in a single cloud.
@@ -136,9 +133,9 @@ def __init__(self, **kwargs):
if not exclude_visual_studio_code_credential:
credentials.append(VisualStudioCodeCredential(**vscode_args))
if not exclude_cli_credential:
- credentials.append(AzureCliCredential(**kwargs))
+ credentials.append(AzureCliCredential())
if not exclude_powershell_credential:
- credentials.append(AzurePowerShellCredential(**kwargs))
+ credentials.append(AzurePowerShellCredential())
if not exclude_interactive_browser_credential:
if interactive_browser_client_id:
credentials.append(
@@ -158,8 +155,7 @@ def get_token(self, *scopes, **kwargs):
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/device_code.py b/sdk/identity/azure-identity/azure/identity/_credentials/device_code.py
index 657bd5eb5568..e5af7b89c8d1 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/device_code.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/device_code.py
@@ -55,9 +55,6 @@ class DeviceCodeCredential(InteractiveCredential):
:keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential
will cache tokens in memory.
:paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the user is registered in. When False, which is the default, the credential will acquire tokens only from the
- user's home tenant or the tenant specified by **tenant_id**.
"""
def __init__(self, client_id=DEVELOPER_SIGN_ON_CLIENT_ID, **kwargs):
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/environment.py b/sdk/identity/azure-identity/azure/identity/_credentials/environment.py
index 8d0e7401d8b2..d2b134d11e9e 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/environment.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/environment.py
@@ -52,10 +52,6 @@ class EnvironmentCredential(object):
- **AZURE_TENANT_ID**: (optional) ID of the service principal's tenant. Also called its 'directory' ID.
If not provided, defaults to the 'organizations' tenant, which supports only Azure Active Directory work or
school accounts.
-
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the application or user is registered in. When False, which is the default, the credential will acquire tokens
- only from the tenant specified by **AZURE_TENANT_ID**.
"""
def __init__(self, **kwargs):
@@ -123,8 +119,7 @@ def get_token(self, *scopes, **kwargs): # pylint:disable=unused-argument
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/on_behalf_of.py b/sdk/identity/azure-identity/azure/identity/_credentials/on_behalf_of.py
index bc39c7a475e1..70f3c407ca16 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/on_behalf_of.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/on_behalf_of.py
@@ -39,9 +39,6 @@ class OnBehalfOfCredential(MsalCredential, GetTokenMixin):
:param str user_assertion: the access token the credential will use as the user assertion when requesting
on-behalf-of tokens
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the application is registered in. When False, which is the default, the credential will acquire tokens only
- from the tenant specified by **tenant_id**.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example "login.microsoftonline.com",
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds.
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/shared_cache.py b/sdk/identity/azure-identity/azure/identity/_credentials/shared_cache.py
index 906aaad174fc..3cef36e80507 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/shared_cache.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/shared_cache.py
@@ -34,10 +34,6 @@ class SharedTokenCacheCredential(object):
:keyword cache_persistence_options: configuration for persistent token caching. If not provided, the credential
will use the persistent cache shared by Microsoft development applications
:paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the user is registered in. When False, which is the default, the credential will acquire tokens only from the
- user's home tenant or, if a value was given for **authentication_record**, the tenant specified by the
- :class:`AuthenticationRecord`.
"""
def __init__(self, username=None, **kwargs):
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/silent.py b/sdk/identity/azure-identity/azure/identity/_credentials/silent.py
index b1aa1ade8c46..08e0611fa5e4 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/silent.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/silent.py
@@ -35,7 +35,6 @@ def __init__(self, authentication_record, **kwargs):
# authenticate in the tenant that produced the record unless "tenant_id" specifies another
self._tenant_id = kwargs.pop("tenant_id", None) or self._auth_record.tenant_id
validate_tenant_id(self._tenant_id)
- self._allow_multitenant = kwargs.pop("allow_multitenant_authentication", False)
self._cache = kwargs.pop("_cache", None)
self._client_applications = {} # type: Dict[str, PublicClientApplication]
self._client = MsalClient(**kwargs)
@@ -74,7 +73,7 @@ def _initialize(self):
self._initialized = True
def _get_client_application(self, **kwargs):
- tenant_id = resolve_tenant(self._tenant_id, self._allow_multitenant, **kwargs)
+ tenant_id = resolve_tenant(self._tenant_id, **kwargs)
if tenant_id not in self._client_applications:
# CP1 = can handle claims challenges (CAE)
capabilities = None if "AZURE_IDENTITY_DISABLE_CP1" in os.environ else ["CP1"]
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/user_password.py b/sdk/identity/azure-identity/azure/identity/_credentials/user_password.py
index 77281a185e6e..0521e8fa42d6 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/user_password.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/user_password.py
@@ -37,9 +37,6 @@ class UsernamePasswordCredential(InteractiveCredential):
:keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential
will cache tokens in memory.
:paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the user is registered in. When False, which is the default, the credential will acquire tokens only from the
- user's home tenant or the tenant specified by **tenant_id**.
"""
def __init__(self, client_id, username, password, **kwargs):
diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/vscode.py b/sdk/identity/azure-identity/azure/identity/_credentials/vscode.py
index cd6866f319da..a66dc9d234a9 100644
--- a/sdk/identity/azure-identity/azure/identity/_credentials/vscode.py
+++ b/sdk/identity/azure-identity/azure/identity/_credentials/vscode.py
@@ -120,9 +120,6 @@ class VisualStudioCodeCredential(_VSCodeCredentialBase, GetTokenMixin):
:keyword str tenant_id: ID of the tenant the credential should authenticate in. Defaults to the "Azure: Tenant"
setting in VS Code's user settings or, when that setting has no value, the "organizations" tenant, which
supports only Azure Active Directory work or school accounts.
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the user is registered in. When False, which is the default, the credential will acquire tokens only from the
- user's home tenant or the tenant configured by **tenant_id** or VS Code's user settings.
"""
def __enter__(self):
diff --git a/sdk/identity/azure-identity/azure/identity/_internal/__init__.py b/sdk/identity/azure-identity/azure/identity/_internal/__init__.py
index a1799b0679a8..d9c7203734a5 100644
--- a/sdk/identity/azure-identity/azure/identity/_internal/__init__.py
+++ b/sdk/identity/azure-identity/azure/identity/_internal/__init__.py
@@ -7,8 +7,6 @@
from six.moves.urllib_parse import urlparse
-from azure.core.exceptions import ClientAuthenticationError
-
from .._constants import EnvironmentVariables, KnownAuthorities
if TYPE_CHECKING:
@@ -66,21 +64,16 @@ def validate_tenant_id(tenant_id):
)
-def resolve_tenant(default_tenant, allow_multitenant, tenant_id=None, **_):
- # type: (str, bool, Optional[str], **Any) -> str
+def resolve_tenant(default_tenant, tenant_id=None, **_):
+ # type: (str, Optional[str], **Any) -> str
"""Returns the correct tenant for a token request given a credential's configuration"""
if (
tenant_id is None
- or tenant_id == default_tenant
- or os.environ.get(EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION)
+ or default_tenant == "adfs"
+ or os.environ.get(EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH)
):
return default_tenant
- if not allow_multitenant:
- raise ClientAuthenticationError(
- 'The specified tenant for this token request, "{}", does not match'.format(tenant_id)
- + ' the configured tenant, and "allow_multitenant_authentication" is False.'
- )
return tenant_id
diff --git a/sdk/identity/azure-identity/azure/identity/_internal/aad_client_base.py b/sdk/identity/azure-identity/azure/identity/_internal/aad_client_base.py
index b6a44e8d5681..f33cf40ce227 100644
--- a/sdk/identity/azure-identity/azure/identity/_internal/aad_client_base.py
+++ b/sdk/identity/azure-identity/azure/identity/_internal/aad_client_base.py
@@ -47,13 +47,12 @@ class AadClientBase(ABC):
_POST = ["POST"]
def __init__(
- self, tenant_id, client_id, authority=None, cache=None, allow_multitenant_authentication=False, **kwargs
+ self, tenant_id, client_id, authority=None, cache=None, **kwargs
):
- # type: (str, str, Optional[str], Optional[TokenCache], bool, **Any) -> None
+ # type: (str, str, Optional[str], Optional[TokenCache], **Any) -> None
self._authority = normalize_authority(authority) if authority else get_default_authority()
self._tenant_id = tenant_id
- self._allow_multitenant = allow_multitenant_authentication
self._cache = cache or TokenCache()
self._client_id = client_id
@@ -61,7 +60,7 @@ def __init__(
def get_cached_access_token(self, scopes, **kwargs):
# type: (Iterable[str], **Any) -> Optional[AccessToken]
- tenant = resolve_tenant(self._tenant_id, self._allow_multitenant, **kwargs)
+ tenant = resolve_tenant(self._tenant_id, **kwargs)
tokens = self._cache.find(
TokenCache.CredentialType.ACCESS_TOKEN,
target=list(scopes),
@@ -260,7 +259,7 @@ def _get_refresh_token_request(self, scopes, refresh_token, **kwargs):
def _get_token_url(self, **kwargs):
# type: (**Any) -> str
- tenant = resolve_tenant(self._tenant_id, self._allow_multitenant, **kwargs)
+ tenant = resolve_tenant(self._tenant_id, **kwargs)
return "/".join((self._authority, tenant, "oauth2/v2.0/token"))
def _post(self, data, **kwargs):
diff --git a/sdk/identity/azure-identity/azure/identity/_internal/get_token_mixin.py b/sdk/identity/azure-identity/azure/identity/_internal/get_token_mixin.py
index d8b683e984bd..29f30bb1fb8a 100644
--- a/sdk/identity/azure-identity/azure/identity/_internal/get_token_mixin.py
+++ b/sdk/identity/azure-identity/azure/identity/_internal/get_token_mixin.py
@@ -57,8 +57,7 @@ def get_token(self, *scopes, **kwargs):
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
diff --git a/sdk/identity/azure-identity/azure/identity/_internal/interactive.py b/sdk/identity/azure-identity/azure/identity/_internal/interactive.py
index a095c783c595..e448e6d90a9c 100644
--- a/sdk/identity/azure-identity/azure/identity/_internal/interactive.py
+++ b/sdk/identity/azure-identity/azure/identity/_internal/interactive.py
@@ -109,8 +109,7 @@ def get_token(self, *scopes, **kwargs):
:param str scopes: desired scopes for the access token. This method requires at least one scope.
:keyword str claims: additional claims required in the token, such as those returned in a resource provider's
claims challenge following an authorization failure
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
diff --git a/sdk/identity/azure-identity/azure/identity/_internal/msal_credentials.py b/sdk/identity/azure-identity/azure/identity/_internal/msal_credentials.py
index 8ac10bbd687d..4ab266ae26c3 100644
--- a/sdk/identity/azure-identity/azure/identity/_internal/msal_credentials.py
+++ b/sdk/identity/azure-identity/azure/identity/_internal/msal_credentials.py
@@ -28,13 +28,9 @@ def __init__(self, client_id, client_credential=None, **kwargs):
# type: (str, Optional[Union[str, Dict]], **Any) -> None
authority = kwargs.pop("authority", None)
self._authority = normalize_authority(authority) if authority else get_default_authority()
- self._regional_authority = kwargs.pop(
- "regional_authority", os.environ.get(EnvironmentVariables.AZURE_REGIONAL_AUTHORITY_NAME)
- )
+ self._regional_authority = os.environ.get(EnvironmentVariables.AZURE_REGIONAL_AUTHORITY_NAME)
self._tenant_id = kwargs.pop("tenant_id", None) or "organizations"
validate_tenant_id(self._tenant_id)
- self._allow_multitenant = kwargs.pop("allow_multitenant_authentication", False)
-
self._client = MsalClient(**kwargs)
self._client_applications = {} # type: Dict[str, msal.ClientApplication]
self._client_credential = client_credential
@@ -63,7 +59,7 @@ def close(self):
def _get_app(self, **kwargs):
# type: (**Any) -> msal.ClientApplication
- tenant_id = resolve_tenant(self._tenant_id, self._allow_multitenant, **kwargs)
+ tenant_id = resolve_tenant(self._tenant_id, **kwargs)
if tenant_id not in self._client_applications:
# CP1 = can handle claims challenges (CAE)
capabilities = None if "AZURE_IDENTITY_DISABLE_CP1" in os.environ else ["CP1"]
diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/application.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/application.py
index 7e63bc9b78db..c7812ea69587 100644
--- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/application.py
+++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/application.py
@@ -48,10 +48,6 @@ class AzureApplicationCredential(ChainedTokenCredential):
`_ for an overview of
managed identities.
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the application or user is registered in. When False, which is the default, the credential will acquire tokens
- only from the tenant specified by **AZURE_TENANT_ID**. This argument doesn't apply to managed identity
- authentication.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example "login.microsoftonline.com",
the authority for Azure Public Cloud, which is the default when no value is given for this keyword argument or
environment variable AZURE_AUTHORITY_HOST. :class:`~azure.identity.AzureAuthorityHosts` defines authorities for
diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/authorization_code.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/authorization_code.py
index 225fbe434d94..4befed6e9eeb 100644
--- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/authorization_code.py
+++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/authorization_code.py
@@ -30,9 +30,6 @@ class AuthorizationCodeCredential(AsyncContextManager, GetTokenMixin):
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds.
:keyword str client_secret: One of the application's client secrets. Required only for web apps and web APIs.
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the user is registered in. When False, which is the default, the credential will acquire tokens only from the
- user's home tenant or the tenant specified by **tenant_id**.
"""
async def __aenter__(self):
@@ -66,8 +63,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken":
redeeming the authorization code.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_cli.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_cli.py
index 944a2211d023..869cf5de69ae 100644
--- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_cli.py
+++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_cli.py
@@ -31,15 +31,8 @@ class AzureCliCredential(AsyncContextManager):
"""Authenticates by requesting a token from the Azure CLI.
This requires previously logging in to Azure via "az login", and will use the CLI's currently logged in identity.
-
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the identity logged in to the Azure CLI is registered in. When False, which is the default, the credential will
- acquire tokens only from the tenant of the Azure CLI's active subscription.
"""
- def __init__(self, **kwargs: "Any") -> None:
- self._allow_multitenant = kwargs.get("allow_multitenant_authentication", False)
-
@log_get_token_async
async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken":
"""Request an access token for `scopes`.
@@ -48,8 +41,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken":
also handle token caching because this credential doesn't cache the tokens it acquires.
:param str scopes: desired scope for the access token. This credential allows only one scope per request.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
@@ -63,7 +55,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken":
resource = _scopes_to_resource(*scopes)
command = COMMAND_LINE.format(resource)
- tenant = resolve_tenant("", self._allow_multitenant, **kwargs)
+ tenant = resolve_tenant("", **kwargs)
if tenant:
command += " --tenant " + tenant
output = await _run_command(command)
diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_powershell.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_powershell.py
index cfb3cd4331a1..0881dd4e2b99 100644
--- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_powershell.py
+++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/azure_powershell.py
@@ -28,15 +28,8 @@ class AzurePowerShellCredential(AsyncContextManager):
"""Authenticates by requesting a token from Azure PowerShell.
This requires previously logging in to Azure via "Connect-AzAccount", and will use the currently logged in identity.
-
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the identity logged in to Azure PowerShell is registered in. When False, which is the default, the credential
- will acquire tokens only from the tenant of Azure PowerShell's active subscription.
"""
- def __init__(self, **kwargs: "Any") -> None:
- self._allow_multitenant = kwargs.get("allow_multitenant_authentication", False)
-
@log_get_token_async
async def get_token(
self, *scopes: str, **kwargs: "Any"
@@ -47,8 +40,7 @@ async def get_token(
also handle token caching because this credential doesn't cache the tokens it acquires.
:param str scopes: desired scope for the access token. This credential allows only one scope per request.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
@@ -61,7 +53,7 @@ async def get_token(
if sys.platform.startswith("win") and not isinstance(asyncio.get_event_loop(), asyncio.ProactorEventLoop):
return _SyncCredential().get_token(*scopes, **kwargs)
- tenant_id = resolve_tenant("", self._allow_multitenant, **kwargs)
+ tenant_id = resolve_tenant("", **kwargs)
command_line = get_command_line(scopes, tenant_id)
output = await run_command_line(command_line)
token = parse_token(output)
diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/certificate.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/certificate.py
index a78b9b790eac..0957e6a151c6 100644
--- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/certificate.py
+++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/certificate.py
@@ -40,9 +40,6 @@ class CertificateCredential(AsyncContextManager, GetTokenMixin):
:keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential
will cache tokens in memory.
:paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the application is registered in. When False, which is the default, the credential will acquire tokens only from
- the tenant specified by **tenant_id**.
"""
def __init__(self, tenant_id, client_id, certificate_path=None, **kwargs):
diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/client_secret.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/client_secret.py
index 676e0b15e790..4bcfa49cbd19 100644
--- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/client_secret.py
+++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/client_secret.py
@@ -29,9 +29,6 @@ class ClientSecretCredential(AsyncContextManager, GetTokenMixin):
:keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential
will cache tokens in memory.
:paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the application is registered in. When False, which is the default, the credential will acquire tokens only from
- the tenant specified by **tenant_id**.
"""
def __init__(self, tenant_id: str, client_id: str, client_secret: str, **kwargs: "Any") -> None:
diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/default.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/default.py
index 8888e5d28874..5b32b0429c54 100644
--- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/default.py
+++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/default.py
@@ -42,9 +42,6 @@ class DefaultAzureCredential(ChainedTokenCredential):
This default behavior is configurable with keyword arguments.
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the application is registered in. When False, which is the default, the credential will acquire tokens only from
- its configured tenant. This argument doesn't apply to managed identity authentication.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com',
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds. Managed identities ignore this because they reside in a single cloud.
@@ -121,9 +118,9 @@ def __init__(self, **kwargs: "Any") -> None:
if not exclude_visual_studio_code_credential:
credentials.append(VisualStudioCodeCredential(**vscode_args))
if not exclude_cli_credential:
- credentials.append(AzureCliCredential(**kwargs))
+ credentials.append(AzureCliCredential())
if not exclude_powershell_credential:
- credentials.append(AzurePowerShellCredential(**kwargs))
+ credentials.append(AzurePowerShellCredential())
super().__init__(*credentials)
@@ -133,8 +130,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken":
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/environment.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/environment.py
index d4c0bdff2047..944add051c94 100644
--- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/environment.py
+++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/environment.py
@@ -37,10 +37,6 @@ class EnvironmentCredential(AsyncContextManager):
- **AZURE_CLIENT_ID**: the service principal's client ID
- **AZURE_CLIENT_CERTIFICATE_PATH**: path to a PEM or PKCS12 certificate file including the private key. The
certificate must not be password-protected.
-
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the application or user is registered in. When False, which is the default, the credential will acquire tokens
- only from the tenant specified by **AZURE_TENANT_ID**.
"""
def __init__(self, **kwargs: "Any") -> None:
@@ -91,8 +87,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken":
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/on_behalf_of.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/on_behalf_of.py
index bc7c75f2123a..202de061d3ae 100644
--- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/on_behalf_of.py
+++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/on_behalf_of.py
@@ -36,9 +36,6 @@ class OnBehalfOfCredential(AsyncContextManager, GetTokenMixin):
:param str user_assertion: the access token the credential will use as the user assertion when requesting
on-behalf-of tokens
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the application is registered in. When False, which is the default, the credential will acquire tokens only
- from the tenant specified by **tenant_id**.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example "login.microsoftonline.com",
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds.
@@ -74,7 +71,7 @@ def __init__(
else:
self._client_credential = client_credential
- # note AadClient handles "allow_multitenant_authentication", "authority", and any pipeline kwargs
+ # note AadClient handles "authority" and any pipeline kwargs
self._client = AadClient(tenant_id, client_id, **kwargs)
self._assertion = user_assertion
diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/shared_cache.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/shared_cache.py
index b663f16623af..1852b8523586 100644
--- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/shared_cache.py
+++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/shared_cache.py
@@ -32,9 +32,6 @@ class SharedTokenCacheCredential(SharedTokenCacheBase, AsyncContextManager):
:keyword cache_persistence_options: configuration for persistent token caching. If not provided, the credential
will use the persistent cache shared by Microsoft development applications
:paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the user is registered in. When False, which is the default, the credential will acquire tokens only from the
- user's home tenant.
"""
async def __aenter__(self):
@@ -57,8 +54,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken": # py
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/vscode.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/vscode.py
index 586354f8ff30..c66e1e1c2611 100644
--- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/vscode.py
+++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/vscode.py
@@ -26,9 +26,6 @@ class VisualStudioCodeCredential(_VSCodeCredentialBase, AsyncContextManager, Get
:keyword str tenant_id: ID of the tenant the credential should authenticate in. Defaults to the "Azure: Tenant"
setting in VS Code's user settings or, when that setting has no value, the "organizations" tenant, which
supports only Azure Active Directory work or school accounts.
- :keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
- the user is registered in. When False, which is the default, the credential will acquire tokens only from the
- user's home tenant or the tenant configured by **tenant_id** or VS Code's user settings.
"""
async def __aenter__(self) -> "VisualStudioCodeCredential":
@@ -48,8 +45,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken":
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
diff --git a/sdk/identity/azure-identity/azure/identity/aio/_internal/get_token_mixin.py b/sdk/identity/azure-identity/azure/identity/aio/_internal/get_token_mixin.py
index 17b8d225b55d..f41db52d4132 100644
--- a/sdk/identity/azure-identity/azure/identity/aio/_internal/get_token_mixin.py
+++ b/sdk/identity/azure-identity/azure/identity/aio/_internal/get_token_mixin.py
@@ -47,8 +47,7 @@ async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken":
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
- :keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
- is False, specifying a tenant with this argument may raise an exception.
+ :keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
diff --git a/sdk/identity/azure-identity/setup.py b/sdk/identity/azure-identity/setup.py
index 9dcd57da02a4..6ffc1683458d 100644
--- a/sdk/identity/azure-identity/setup.py
+++ b/sdk/identity/azure-identity/setup.py
@@ -61,6 +61,7 @@
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
],
zip_safe=False,
diff --git a/sdk/identity/azure-identity/tests/test_aad_client.py b/sdk/identity/azure-identity/tests/test_aad_client.py
index 68a452c1e287..3bee135d4268 100644
--- a/sdk/identity/azure-identity/tests/test_aad_client.py
+++ b/sdk/identity/azure-identity/tests/test_aad_client.py
@@ -304,11 +304,7 @@ def test_multitenant_cache():
assert client_b.get_cached_access_token([scope]) is None
# but C allows multitenant auth and should therefore return the token from tenant_a when appropriate
- client_c = AadClient(tenant_id=tenant_c, allow_multitenant_authentication=True, **common_args)
+ client_c = AadClient(tenant_id=tenant_c, **common_args)
assert client_c.get_cached_access_token([scope]) is None
token = client_c.get_cached_access_token([scope], tenant_id=tenant_a)
assert token.token == expected_token
- with patch.dict(
- "os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}, clear=True
- ):
- assert client_c.get_cached_access_token([scope], tenant_id=tenant_a) is None
diff --git a/sdk/identity/azure-identity/tests/test_aad_client_async.py b/sdk/identity/azure-identity/tests/test_aad_client_async.py
index dba17bc11cf6..ab30393f93d3 100644
--- a/sdk/identity/azure-identity/tests/test_aad_client_async.py
+++ b/sdk/identity/azure-identity/tests/test_aad_client_async.py
@@ -308,11 +308,7 @@ async def test_multitenant_cache():
assert client_b.get_cached_access_token([scope]) is None
# but C allows multitenant auth and should therefore return the token from tenant_a when appropriate
- client_c = AadClient(tenant_id=tenant_c, allow_multitenant_authentication=True, **common_args)
+ client_c = AadClient(tenant_id=tenant_c, **common_args)
assert client_c.get_cached_access_token([scope]) is None
token = client_c.get_cached_access_token([scope], tenant_id=tenant_a)
assert token.token == expected_token
- with patch.dict(
- "os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}, clear=True
- ):
- assert client_c.get_cached_access_token([scope], tenant_id=tenant_a) is None
diff --git a/sdk/identity/azure-identity/tests/test_auth_code.py b/sdk/identity/azure-identity/tests/test_auth_code.py
index 29ab3733a633..f2fe752ff528 100644
--- a/sdk/identity/azure-identity/tests/test_auth_code.py
+++ b/sdk/identity/azure-identity/tests/test_auth_code.py
@@ -118,9 +118,7 @@ def test_auth_code_credential():
assert transport.send.call_count == 2
-def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+def test_multitenant_authentication():
first_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
@@ -138,7 +136,6 @@ def send(request, **_):
"client-id",
"authcode",
"https://localhost",
- allow_multitenant_authentication=True,
transport=Mock(send=send),
)
token = credential.get_token("scope")
@@ -154,10 +151,7 @@ def send(request, **_):
token = credential.get_token("scope")
assert token.token == first_token
-
def test_multitenant_authentication_not_allowed():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
expected_tenant = "expected-tenant"
expected_token = "***"
@@ -174,15 +168,12 @@ def send(request, **_):
token = credential.get_token("scope")
assert token.token == expected_token
- # explicitly specifying the configured tenant is okay
token = credential.get_token("scope", tenant_id=expected_tenant)
assert token.token == expected_token
- # but any other tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- credential.get_token("scope", tenant_id="un" + expected_tenant)
+ token = credential.get_token("scope", tenant_id="un" + expected_tenant)
+ assert token.token == expected_token * 2
- # ...unless the compat switch is enabled
- with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}):
+ with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}):
token = credential.get_token("scope", tenant_id="un" + expected_tenant)
- assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ assert token.token == expected_token
diff --git a/sdk/identity/azure-identity/tests/test_auth_code_async.py b/sdk/identity/azure-identity/tests/test_auth_code_async.py
index 5eb55e6515cc..e5a91ca62750 100644
--- a/sdk/identity/azure-identity/tests/test_auth_code_async.py
+++ b/sdk/identity/azure-identity/tests/test_auth_code_async.py
@@ -142,9 +142,7 @@ async def test_auth_code_credential():
assert transport.send.call_count == 2
-async def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+async def test_multitenant_authentication():
first_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
@@ -162,7 +160,6 @@ async def send(request, **_):
"client-id",
"authcode",
"https://localhost",
- allow_multitenant_authentication=True,
transport=Mock(send=send),
)
token = await credential.get_token("scope")
@@ -178,10 +175,7 @@ async def send(request, **_):
token = await credential.get_token("scope")
assert token.token == first_token
-
async def test_multitenant_authentication_not_allowed():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
expected_tenant = "expected-tenant"
expected_token = "***"
@@ -198,15 +192,12 @@ async def send(request, **_):
token = await credential.get_token("scope")
assert token.token == expected_token
- # explicitly specifying the configured tenant is okay
token = await credential.get_token("scope", tenant_id=expected_tenant)
assert token.token == expected_token
- # but any other tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- await credential.get_token("scope", tenant_id="un" + expected_tenant)
+ token = await credential.get_token("scope", tenant_id="un" + expected_tenant)
+ assert token.token == expected_token * 2
- # ...unless the compat switch is enabled
- with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}):
+ with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}):
token = await credential.get_token("scope", tenant_id="un" + expected_tenant)
- assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ assert token.token == expected_token
diff --git a/sdk/identity/azure-identity/tests/test_certificate_credential.py b/sdk/identity/azure-identity/tests/test_certificate_credential.py
index 389bf336297c..9215882abb3a 100644
--- a/sdk/identity/azure-identity/tests/test_certificate_credential.py
+++ b/sdk/identity/azure-identity/tests/test_certificate_credential.py
@@ -7,7 +7,8 @@
from azure.core.exceptions import ClientAuthenticationError
from azure.core.pipeline.policies import ContentDecodePolicy, SansIOHTTPPolicy
-from azure.identity import CertificateCredential, RegionalAuthority, TokenCachePersistenceOptions
+from azure.identity import CertificateCredential, TokenCachePersistenceOptions
+from azure.identity._enums import RegionalAuthority
from azure.identity._constants import EnvironmentVariables
from azure.identity._credentials.certificate import load_pkcs12_certificate
from azure.identity._internal.user_agent import USER_AGENT
@@ -151,17 +152,6 @@ def test_regional_authority():
for region in RegionalAuthority:
mock_confidential_client.reset_mock()
- with patch.dict("os.environ", {}, clear=True):
- credential = CertificateCredential("tenant", "client-id", PEM_CERT_PATH, regional_authority=region)
- with patch("msal.ConfidentialClientApplication", mock_confidential_client):
- # must call get_token because the credential constructs the MSAL application lazily
- credential.get_token("scope")
-
- assert mock_confidential_client.call_count == 1
- _, kwargs = mock_confidential_client.call_args
- assert kwargs["azure_region"] == region
- mock_confidential_client.reset_mock()
-
# region can be configured via environment variable
with patch.dict("os.environ", {EnvironmentVariables.AZURE_REGIONAL_AUTHORITY_NAME: region}, clear=True):
credential = CertificateCredential("tenant", "client-id", PEM_CERT_PATH)
@@ -359,9 +349,7 @@ def test_certificate_arguments():
@pytest.mark.parametrize("cert_path,cert_password", ALL_CERTS)
-def test_allow_multitenant_authentication(cert_path, cert_password):
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+def test_multitenant_authentication(cert_path, cert_password):
first_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
@@ -382,7 +370,6 @@ def send(request, **_):
"client-id",
cert_path,
password=cert_password,
- allow_multitenant_authentication=True,
transport=Mock(send=send),
)
token = credential.get_token("scope")
@@ -401,8 +388,6 @@ def send(request, **_):
@pytest.mark.parametrize("cert_path,cert_password", ALL_CERTS)
def test_multitenant_authentication_backcompat(cert_path, cert_password):
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
expected_tenant = "expected-tenant"
expected_token = "***"
@@ -426,13 +411,5 @@ def send(request, **_):
token = credential.get_token("scope", tenant_id=expected_tenant)
assert token.token == expected_token
- # but any other tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- credential.get_token("scope", tenant_id="un" + expected_tenant)
-
- # ...unless the compat switch is enabled
- with patch.dict(
- os.environ, {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}, clear=True
- ):
- token = credential.get_token("scope", tenant_id="un" + expected_tenant)
- assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ token = credential.get_token("scope", tenant_id="un" + expected_tenant)
+ assert token.token == expected_token
diff --git a/sdk/identity/azure-identity/tests/test_certificate_credential_async.py b/sdk/identity/azure-identity/tests/test_certificate_credential_async.py
index a2338e78a910..8c5cc5b67efa 100644
--- a/sdk/identity/azure-identity/tests/test_certificate_credential_async.py
+++ b/sdk/identity/azure-identity/tests/test_certificate_credential_async.py
@@ -270,9 +270,7 @@ def test_certificate_arguments():
@pytest.mark.asyncio
@pytest.mark.parametrize("cert_path,cert_password", ALL_CERTS)
-async def test_allow_multitenant_authentication(cert_path, cert_password):
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+async def test_multitenant_authentication(cert_path, cert_password):
first_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
@@ -290,7 +288,6 @@ async def send(request, **_):
"client-id",
cert_path,
password=cert_password,
- allow_multitenant_authentication=True,
transport=Mock(send=send),
)
token = await credential.get_token("scope")
@@ -310,8 +307,6 @@ async def send(request, **_):
@pytest.mark.asyncio
@pytest.mark.parametrize("cert_path,cert_password", ALL_CERTS)
async def test_multitenant_authentication_backcompat(cert_path, cert_password):
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
expected_tenant = "expected-tenant"
expected_token = "***"
@@ -332,13 +327,5 @@ async def send(request, **_):
token = await credential.get_token("scope", tenant_id=expected_tenant)
assert token.token == expected_token
- # but any other tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- await credential.get_token("scope", tenant_id="un" + expected_tenant)
-
- # ...unless the compat switch is enabled
- with patch.dict(
- "os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}, clear=True
- ):
- token = await credential.get_token("scope", tenant_id="un" + expected_tenant)
- assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ token = await credential.get_token("scope", tenant_id="un" + expected_tenant)
+ assert token.token == expected_token * 2
diff --git a/sdk/identity/azure-identity/tests/test_cli_credential.py b/sdk/identity/azure-identity/tests/test_cli_credential.py
index bac97fd4ac7c..6eb71b97e722 100644
--- a/sdk/identity/azure-identity/tests/test_cli_credential.py
+++ b/sdk/identity/azure-identity/tests/test_cli_credential.py
@@ -152,9 +152,7 @@ def test_timeout():
AzureCliCredential().get_token("scope")
-def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+def test_multitenant_authentication():
default_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
@@ -174,7 +172,7 @@ def fake_check_output(command_line, **_):
}
)
- credential = AzureCliCredential(allow_multitenant_authentication=True)
+ credential = AzureCliCredential()
with mock.patch(CHECK_OUTPUT, fake_check_output):
token = credential.get_token("scope")
assert token.token == first_token
@@ -189,10 +187,7 @@ def fake_check_output(command_line, **_):
token = credential.get_token("scope")
assert token.token == first_token
-
def test_multitenant_authentication_not_allowed():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
expected_tenant = "expected-tenant"
expected_token = "***"
@@ -214,15 +209,8 @@ def fake_check_output(command_line, **_):
token = credential.get_token("scope")
assert token.token == expected_token
- # specifying a tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- credential.get_token("scope", tenant_id="un" + expected_tenant)
-
- # ...unless the compat switch is enabled
with mock.patch.dict(
- "os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}
+ "os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}
):
token = credential.get_token("scope", tenant_id="un" + expected_tenant)
- assert (
- token.token == expected_token
- ), "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ assert token.token == expected_token
diff --git a/sdk/identity/azure-identity/tests/test_cli_credential_async.py b/sdk/identity/azure-identity/tests/test_cli_credential_async.py
index d5f5885f5d1f..2276a1bff3e5 100644
--- a/sdk/identity/azure-identity/tests/test_cli_credential_async.py
+++ b/sdk/identity/azure-identity/tests/test_cli_credential_async.py
@@ -185,9 +185,7 @@ async def test_timeout():
assert proc.kill.call_count == 1
-async def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+async def test_multitenant_authentication():
default_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
@@ -208,7 +206,7 @@ async def fake_exec(*args, **_):
).encode()
return mock.Mock(communicate=mock.Mock(return_value=get_completed_future((output, b""))), returncode=0)
- credential = AzureCliCredential(allow_multitenant_authentication=True)
+ credential = AzureCliCredential()
with mock.patch(SUBPROCESS_EXEC, fake_exec):
token = await credential.get_token("scope")
assert token.token == first_token
@@ -223,10 +221,7 @@ async def fake_exec(*args, **_):
token = await credential.get_token("scope")
assert token.token == first_token
-
async def test_multitenant_authentication_not_allowed():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
expected_tenant = "expected-tenant"
expected_token = "***"
@@ -249,13 +244,6 @@ async def fake_exec(*args, **_):
token = await credential.get_token("scope")
assert token.token == expected_token
- # specifying a tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- await credential.get_token("scope", tenant_id="un" + expected_tenant)
-
- # ...unless the compat switch is enabled
- with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}):
+ with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}):
token = await credential.get_token("scope", tenant_id="un" + expected_tenant)
- assert (
- token.token == expected_token
- ), "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ assert token.token == expected_token
diff --git a/sdk/identity/azure-identity/tests/test_client_secret_credential.py b/sdk/identity/azure-identity/tests/test_client_secret_credential.py
index 7c694c617c60..2528bf0ad191 100644
--- a/sdk/identity/azure-identity/tests/test_client_secret_credential.py
+++ b/sdk/identity/azure-identity/tests/test_client_secret_credential.py
@@ -4,7 +4,8 @@
# ------------------------------------
from azure.core.exceptions import ClientAuthenticationError
from azure.core.pipeline.policies import ContentDecodePolicy, SansIOHTTPPolicy
-from azure.identity import ClientSecretCredential, RegionalAuthority, TokenCachePersistenceOptions
+from azure.identity import ClientSecretCredential, TokenCachePersistenceOptions
+from azure.identity._enums import RegionalAuthority
from azure.identity._constants import EnvironmentVariables
from azure.identity._internal.user_agent import USER_AGENT
from msal import TokenCache
@@ -128,17 +129,6 @@ def test_regional_authority():
for region in RegionalAuthority:
mock_confidential_client.reset_mock()
- with patch.dict("os.environ", {}, clear=True):
- credential = ClientSecretCredential("tenant", "client-id", "secret", regional_authority=region)
- with patch("msal.ConfidentialClientApplication", mock_confidential_client):
- # must call get_token because the credential constructs the MSAL application lazily
- credential.get_token("scope")
-
- assert mock_confidential_client.call_count == 1
- _, kwargs = mock_confidential_client.call_args
- assert kwargs["azure_region"] == region
- mock_confidential_client.reset_mock()
-
# region can be configured via environment variable
with patch.dict("os.environ", {EnvironmentVariables.AZURE_REGIONAL_AUTHORITY_NAME: region}, clear=True):
credential = ClientSecretCredential("tenant", "client-id", "secret")
@@ -211,9 +201,7 @@ def test_cache_multiple_clients():
assert len(cache.find(TokenCache.CredentialType.ACCESS_TOKEN)) == 2
-def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+def test_multitenant_authentication():
first_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
@@ -230,7 +218,7 @@ def send(request, **_):
return mock_response(json_payload=build_aad_response(access_token=token))
credential = ClientSecretCredential(
- first_tenant, "client-id", "secret", allow_multitenant_authentication=True, transport=Mock(send=send)
+ first_tenant, "client-id", "secret", transport=Mock(send=send)
)
token = credential.get_token("scope")
assert token.token == first_token
@@ -245,10 +233,7 @@ def send(request, **_):
token = credential.get_token("scope")
assert token.token == first_token
-
def test_multitenant_authentication_not_allowed():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
expected_tenant = "expected-tenant"
expected_token = "***"
@@ -266,15 +251,9 @@ def send(request, **_):
token = credential.get_token("scope")
assert token.token == expected_token
- # explicitly specifying the configured tenant is okay
token = credential.get_token("scope", tenant_id=expected_tenant)
assert token.token == expected_token
- # but any other tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- credential.get_token("scope", tenant_id="un" + expected_tenant)
-
- # ...unless the compat switch is enabled
- with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}):
+ with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}):
token = credential.get_token("scope", tenant_id="un" + expected_tenant)
- assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ assert token.token == expected_token
diff --git a/sdk/identity/azure-identity/tests/test_client_secret_credential_async.py b/sdk/identity/azure-identity/tests/test_client_secret_credential_async.py
index 60554ce9c90b..03e8d323c81d 100644
--- a/sdk/identity/azure-identity/tests/test_client_secret_credential_async.py
+++ b/sdk/identity/azure-identity/tests/test_client_secret_credential_async.py
@@ -251,9 +251,7 @@ async def test_cache_multiple_clients():
@pytest.mark.asyncio
-async def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+async def test_multitenant_authentication():
first_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
@@ -267,7 +265,7 @@ async def send(request, **_):
return mock_response(json_payload=build_aad_response(access_token=token))
credential = ClientSecretCredential(
- first_tenant, "client-id", "secret", allow_multitenant_authentication=True, transport=Mock(send=send)
+ first_tenant, "client-id", "secret", transport=Mock(send=send)
)
token = await credential.get_token("scope")
assert token.token == first_token
@@ -282,11 +280,8 @@ async def send(request, **_):
token = await credential.get_token("scope")
assert token.token == first_token
-
@pytest.mark.asyncio
async def test_multitenant_authentication_not_allowed():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
expected_tenant = "expected-tenant"
expected_token = "***"
@@ -301,15 +296,12 @@ async def send(request, **_):
token = await credential.get_token("scope")
assert token.token == expected_token
- # explicitly specifying the configured tenant is okay
token = await credential.get_token("scope", tenant_id=expected_tenant)
assert token.token == expected_token
- # but any other tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- await credential.get_token("scope", tenant_id="un" + expected_tenant)
+ token = await credential.get_token("scope", tenant_id="un" + expected_tenant)
+ assert token.token == expected_token * 2
- # ...unless the compat switch is enabled
- with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}):
+ with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}):
token = await credential.get_token("scope", tenant_id="un" + expected_tenant)
- assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ assert token.token == expected_token
diff --git a/sdk/identity/azure-identity/tests/test_default.py b/sdk/identity/azure-identity/tests/test_default.py
index 8c8189996861..1c9120a9942a 100644
--- a/sdk/identity/azure-identity/tests/test_default.py
+++ b/sdk/identity/azure-identity/tests/test_default.py
@@ -402,36 +402,6 @@ def validate_client_id(credential):
validate_client_id(mock_credential)
-@pytest.mark.parametrize("expected_value", (True, False))
-def test_allow_multitenant_authentication(expected_value):
- """the credential should pass "allow_multitenant_authentication" to the inner credentials which support it"""
-
- inner_credentials = {
- credential: Mock()
- for credential in (
- "AzureCliCredential",
- "AzurePowerShellCredential",
- "EnvironmentCredential",
- "InteractiveBrowserCredential",
- "ManagedIdentityCredential", # will ignore the argument
- "SharedTokenCacheCredential",
- )
- }
- with patch.multiple(DefaultAzureCredential.__module__, **inner_credentials):
- DefaultAzureCredential(
- allow_multitenant_authentication=expected_value, exclude_interactive_browser_credential=False
- )
-
- for credential_name, mock_credential in inner_credentials.items():
- assert mock_credential.call_count == 1
- _, kwargs = mock_credential.call_args
-
- assert "allow_multitenant_authentication" in kwargs, (
- '"allow_multitenant_authentication" was not passed to ' + credential_name
- )
- assert kwargs["allow_multitenant_authentication"] == expected_value
-
-
def test_unexpected_kwarg():
"""the credential shouldn't raise when given an unexpected keyword argument"""
DefaultAzureCredential(foo=42)
diff --git a/sdk/identity/azure-identity/tests/test_default_async.py b/sdk/identity/azure-identity/tests/test_default_async.py
index 0f144350640c..e4ff1a9fcf11 100644
--- a/sdk/identity/azure-identity/tests/test_default_async.py
+++ b/sdk/identity/azure-identity/tests/test_default_async.py
@@ -312,33 +312,6 @@ def get_credential_for_shared_cache_test(expected_refresh_token, expected_access
return DefaultAzureCredential(_cache=cache, transport=transport, **exclude_other_credentials, **kwargs)
-@pytest.mark.parametrize("expected_value", (True, False))
-def test_allow_multitenant_authentication(expected_value):
- """the credential should pass "allow_multitenant_authentication" to the inner credentials which support it"""
-
- inner_credentials = {
- credential: Mock()
- for credential in (
- "AzureCliCredential",
- "AzurePowerShellCredential",
- "EnvironmentCredential",
- "ManagedIdentityCredential", # will ignore the argument
- "SharedTokenCacheCredential",
- )
- }
- with patch.multiple(DefaultAzureCredential.__module__, **inner_credentials):
- DefaultAzureCredential(allow_multitenant_authentication=expected_value)
-
- for credential_name, mock_credential in inner_credentials.items():
- assert mock_credential.call_count == 1
- _, kwargs = mock_credential.call_args
-
- assert "allow_multitenant_authentication" in kwargs, (
- '"allow_multitenant_authentication" was not passed to ' + credential_name
- )
- assert kwargs["allow_multitenant_authentication"] == expected_value
-
-
def test_unexpected_kwarg():
"""the credential shouldn't raise when given an unexpected keyword argument"""
DefaultAzureCredential(foo=42)
diff --git a/sdk/identity/azure-identity/tests/test_interactive_credential.py b/sdk/identity/azure-identity/tests/test_interactive_credential.py
index e82f76f6c5ba..ba6e407e3aec 100644
--- a/sdk/identity/azure-identity/tests/test_interactive_credential.py
+++ b/sdk/identity/azure-identity/tests/test_interactive_credential.py
@@ -282,9 +282,7 @@ def _request_token(self, *_, **__):
assert record.username == username
-def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+def test_multitenant_authentication():
first_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
@@ -312,7 +310,6 @@ def send(request, **_):
credential = MockCredential(
tenant_id=first_tenant,
- allow_multitenant_authentication=True,
request_token=request_token,
transport=Mock(send=send),
)
@@ -329,10 +326,7 @@ def send(request, **_):
token = credential.get_token("scope")
assert token.token == first_token
-
def test_multitenant_authentication_not_allowed():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
expected_tenant = "expected-tenant"
expected_token = "***"
@@ -360,15 +354,9 @@ def send(request, **_):
token = credential.get_token("scope")
assert token.token == expected_token
- # explicitly specifying the configured tenant is okay
token = credential.get_token("scope", tenant_id=expected_tenant)
assert token.token == expected_token
- # but any other tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- credential.get_token("scope", tenant_id="un" + expected_tenant)
-
- # ...unless the compat switch is enabled
- with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}):
+ with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}):
token = credential.get_token("scope", tenant_id="un" + expected_tenant)
- assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ assert token.token == expected_token
diff --git a/sdk/identity/azure-identity/tests/test_obo.py b/sdk/identity/azure-identity/tests/test_obo.py
index 7cd402ee8c36..413b149be398 100644
--- a/sdk/identity/azure-identity/tests/test_obo.py
+++ b/sdk/identity/azure-identity/tests/test_obo.py
@@ -92,9 +92,7 @@ def test_obo_cert(self):
credential.get_token(self.obo_settings["scope"])
-def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+def test_multitenant_authentication():
first_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
@@ -113,7 +111,7 @@ def send(request, **_):
transport = Mock(send=Mock(wraps=send))
credential = OnBehalfOfCredential(
- first_tenant, "client-id", "secret", "assertion", allow_multitenant_authentication=True, transport=transport
+ first_tenant, "client-id", "secret", "assertion", transport=transport
)
token = credential.get_token("scope")
assert token.token == first_token
diff --git a/sdk/identity/azure-identity/tests/test_obo_async.py b/sdk/identity/azure-identity/tests/test_obo_async.py
index 0bbaecb79150..c39957be0afd 100644
--- a/sdk/identity/azure-identity/tests/test_obo_async.py
+++ b/sdk/identity/azure-identity/tests/test_obo_async.py
@@ -69,9 +69,7 @@ async def test_context_manager():
@pytest.mark.asyncio
-async def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+async def test_multitenant_authentication():
first_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
@@ -87,7 +85,7 @@ async def send(request, **_):
transport = Mock(send=Mock(wraps=send))
credential = OnBehalfOfCredential(
- first_tenant, "client-id", "secret", "assertion", allow_multitenant_authentication=True, transport=transport
+ first_tenant, "client-id", "secret", "assertion", transport=transport
)
token = await credential.get_token("scope")
assert token.token == first_token
diff --git a/sdk/identity/azure-identity/tests/test_powershell_credential.py b/sdk/identity/azure-identity/tests/test_powershell_credential.py
index cdf83543f220..3766b84cb6e8 100644
--- a/sdk/identity/azure-identity/tests/test_powershell_credential.py
+++ b/sdk/identity/azure-identity/tests/test_powershell_credential.py
@@ -243,9 +243,7 @@ def Popen(args, **kwargs):
assert Fake.calls == 2
-def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+def test_multitenant_authentication():
first_token = "***"
second_tenant = "second-tenant"
second_token = first_token * 2
@@ -264,7 +262,7 @@ def fake_Popen(command, **_):
communicate = Mock(return_value=(stdout, ""))
return Mock(communicate=communicate, returncode=0)
- credential = AzurePowerShellCredential(allow_multitenant_authentication=True)
+ credential = AzurePowerShellCredential()
with patch(POPEN, fake_Popen):
token = credential.get_token("scope")
assert token.token == first_token
@@ -276,10 +274,7 @@ def fake_Popen(command, **_):
token = credential.get_token("scope")
assert token.token == first_token
-
def test_multitenant_authentication_not_allowed():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
expected_token = "***"
def fake_Popen(command, **_):
@@ -300,13 +295,6 @@ def fake_Popen(command, **_):
token = credential.get_token("scope")
assert token.token == expected_token
- # specifying a tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- credential.get_token("scope", tenant_id="some tenant")
-
- # ...unless the compat switch is enabled
- with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}):
+ with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}):
token = credential.get_token("scope", tenant_id="some tenant")
- assert (
- token.token == expected_token
- ), "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ assert token.token == expected_token
diff --git a/sdk/identity/azure-identity/tests/test_powershell_credential_async.py b/sdk/identity/azure-identity/tests/test_powershell_credential_async.py
index 2e67d6c19906..0dcc56267f78 100644
--- a/sdk/identity/azure-identity/tests/test_powershell_credential_async.py
+++ b/sdk/identity/azure-identity/tests/test_powershell_credential_async.py
@@ -244,9 +244,7 @@ async def mock_exec(*args, **kwargs):
assert calls == 2
-async def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+async def test_multitenant_authentication():
first_token = "***"
second_tenant = "second-tenant"
second_token = first_token * 2
@@ -266,7 +264,7 @@ async def fake_exec(*args, **_):
communicate = Mock(return_value=get_completed_future((stdout.encode(), b"")))
return Mock(communicate=communicate, returncode=0)
- credential = AzurePowerShellCredential(allow_multitenant_authentication=True)
+ credential = AzurePowerShellCredential()
with patch(CREATE_SUBPROCESS_EXEC, fake_exec):
token = await credential.get_token("scope")
assert token.token == first_token
@@ -278,10 +276,7 @@ async def fake_exec(*args, **_):
token = await credential.get_token("scope")
assert token.token == first_token
-
async def test_multitenant_authentication_not_allowed():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
expected_token = "***"
async def fake_exec(*args, **_):
@@ -302,13 +297,6 @@ async def fake_exec(*args, **_):
token = await credential.get_token("scope")
assert token.token == expected_token
- # specifying a tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- await credential.get_token("scope", tenant_id="some tenant")
-
- # ...unless the compat switch is enabled
- with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}):
+ with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}):
token = await credential.get_token("scope", tenant_id="some tenant")
- assert (
- token.token == expected_token
- ), "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ assert token.token == expected_token
diff --git a/sdk/identity/azure-identity/tests/test_shared_cache_credential.py b/sdk/identity/azure-identity/tests/test_shared_cache_credential.py
index 5081825cebb5..287815c00cce 100644
--- a/sdk/identity/azure-identity/tests/test_shared_cache_credential.py
+++ b/sdk/identity/azure-identity/tests/test_shared_cache_credential.py
@@ -825,9 +825,7 @@ def test_claims_challenge():
assert kwargs["claims_challenge"] == expected_claims
-def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+def test_multitenant_authentication():
default_tenant = "organizations"
first_token = "***"
second_tenant = "second-tenant"
@@ -851,7 +849,7 @@ def send(request, **_):
cache = populated_cache(expected_account)
credential = SharedTokenCacheCredential(
- allow_multitenant_authentication=True, authority=authority, transport=Mock(send=send), _cache=cache
+ authority=authority, transport=Mock(send=send), _cache=cache
)
token = credential.get_token("scope")
assert token.token == first_token
@@ -867,56 +865,7 @@ def send(request, **_):
assert token.token == first_token
-def test_multitenant_authentication_not_allowed():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
- default_tenant = "organizations"
- expected_token = "***"
-
- def send(request, **_):
- parsed = urlparse(request.url)
- tenant_id = parsed.path.split("/")[1]
- assert tenant_id == default_tenant
- return mock_response(
- json_payload=build_aad_response(
- access_token=expected_token,
- id_token_claims=id_token_claims(aud="...", iss="...", sub="..."),
- )
- )
-
- tenant_id = "tenant-id"
- client_id = "client-id"
- authority = "localhost"
- object_id = "object-id"
- username = "me"
-
- expected_account = get_account_event(
- username, object_id, tenant_id, authority=authority, client_id=client_id, refresh_token="**"
- )
- cache = populated_cache(expected_account)
-
- credential = SharedTokenCacheCredential(authority=authority, transport=Mock(send=send), _cache=cache)
-
- token = credential.get_token("scope")
- assert token.token == expected_token
-
- # explicitly specifying the configured tenant is okay
- token = credential.get_token("scope", tenant_id=default_tenant)
- assert token.token == expected_token
-
- # but any other tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- credential.get_token("scope", tenant_id="some tenant")
-
- # ...unless the compat switch is enabled
- with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}):
- token = credential.get_token("scope", tenant_id="some tenant")
- assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled"
-
-
-def test_allow_multitenant_authentication_auth_record():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+def test_multitenant_authentication_auth_record():
default_tenant = "organizations"
first_token = "***"
second_tenant = "second-tenant"
@@ -947,7 +896,6 @@ def send(request, **_):
cache = populated_cache(expected_account)
credential = SharedTokenCacheCredential(
- allow_multitenant_authentication=True,
authority=authority,
transport=Mock(send=send),
authentication_record=record,
@@ -967,64 +915,6 @@ def send(request, **_):
assert token.token == first_token
-def test_multitenant_authentication_not_allowed_authentication_record():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
- default_tenant = "organizations"
- expected_token = "***"
-
- authority = AzureAuthorityHosts.AZURE_PUBLIC_CLOUD
- object_id = "object-id"
- home_account_id = object_id + "." + default_tenant
- record = AuthenticationRecord(default_tenant, "client-id", authority, home_account_id, "user")
-
- def send(request, **_):
- parsed = urlparse(request.url)
- tenant_id = parsed.path.split("/")[1]
- if "/oauth2/v2.0/token" not in request.url:
- return get_discovery_response("https://{}/{}".format(parsed.netloc, tenant_id))
-
- assert tenant_id == default_tenant
- return mock_response(
- json_payload=build_aad_response(
- access_token=expected_token,
- id_token_claims=id_token_claims(aud="...", iss="...", sub="..."),
- )
- )
-
- expected_account = get_account_event(
- record.username,
- object_id,
- record.tenant_id,
- authority=record.authority,
- client_id=record.client_id,
- refresh_token="**",
- )
- cache = populated_cache(expected_account)
-
- credential = SharedTokenCacheCredential(
- authority=authority, transport=Mock(send=send), authentication_record=record, _cache=cache
- )
-
- token = credential.get_token("scope")
- assert token.token == expected_token
-
- # explicitly specifying the configured tenant is okay
- token = credential.get_token("scope", tenant_id=default_tenant)
- assert token.token == expected_token
-
- # but any other tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- credential.get_token("scope", tenant_id="some tenant")
-
- # ...unless the compat switch is enabled
- with patch.dict(
- "os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}, clear=True
- ):
- token = credential.get_token("scope", tenant_id="some tenant")
- assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled"
-
-
def get_account_event(
username, uid, utid, authority=None, client_id="client-id", refresh_token="refresh-token", scopes=None, **kwargs
):
@@ -1054,3 +944,41 @@ def populated_cache(*accounts):
cache.add(account)
cache.add = lambda *_, **__: None # prevent anything being added to the cache
return cache
+
+def test_multitenant_authentication_not_allowed():
+ default_tenant = "organizations"
+ expected_token = "***"
+
+ def send(request, **_):
+ parsed = urlparse(request.url)
+ tenant_id = parsed.path.split("/")[1]
+ assert tenant_id == default_tenant
+ return mock_response(
+ json_payload=build_aad_response(
+ access_token=expected_token,
+ id_token_claims=id_token_claims(aud="...", iss="...", sub="..."),
+ )
+ )
+
+ tenant_id = "tenant-id"
+ client_id = "client-id"
+ authority = "localhost"
+ object_id = "object-id"
+ username = "me"
+
+ expected_account = get_account_event(
+ username, object_id, tenant_id, authority=authority, client_id=client_id, refresh_token="**"
+ )
+ cache = populated_cache(expected_account)
+
+ credential = SharedTokenCacheCredential(authority=authority, transport=Mock(send=send), _cache=cache)
+
+ token = credential.get_token("scope")
+ assert token.token == expected_token
+
+ token = credential.get_token("scope", tenant_id=default_tenant)
+ assert token.token == expected_token
+
+ with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}):
+ token = credential.get_token("scope", tenant_id="some tenant")
+ assert token.token == expected_token
diff --git a/sdk/identity/azure-identity/tests/test_shared_cache_credential_async.py b/sdk/identity/azure-identity/tests/test_shared_cache_credential_async.py
index a6d7f0d67d60..9346755360f1 100644
--- a/sdk/identity/azure-identity/tests/test_shared_cache_credential_async.py
+++ b/sdk/identity/azure-identity/tests/test_shared_cache_credential_async.py
@@ -606,9 +606,7 @@ async def test_initialization():
@pytest.mark.asyncio
-async def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+async def test_multitenant_authentication():
first_token = "***"
second_tenant = "second-tenant"
second_token = first_token * 2
@@ -630,7 +628,7 @@ async def send(request, **_):
cache = populated_cache(expected_account)
credential = SharedTokenCacheCredential(
- allow_multitenant_authentication=True, authority=authority, transport=Mock(send=send), _cache=cache
+ authority=authority, transport=Mock(send=send), _cache=cache
)
token = await credential.get_token("scope")
assert token.token == first_token
@@ -645,11 +643,8 @@ async def send(request, **_):
token = await credential.get_token("scope")
assert token.token == first_token
-
@pytest.mark.asyncio
async def test_multitenant_authentication_not_allowed():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
default_tenant = "organizations"
expected_token = "***"
@@ -675,15 +670,9 @@ async def send(request, **_):
token = await credential.get_token("scope")
assert token.token == expected_token
- # explicitly specifying the configured tenant is okay
token = await credential.get_token("scope", tenant_id=default_tenant)
assert token.token == expected_token
- # but any other tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- await credential.get_token("scope", tenant_id="some tenant")
-
- # ...unless the compat switch is enabled
- with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}):
+ with patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}):
token = await credential.get_token("scope", tenant_id="some tenant")
- assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ assert token.token == expected_token
diff --git a/sdk/identity/azure-identity/tests/test_vscode_credential.py b/sdk/identity/azure-identity/tests/test_vscode_credential.py
index e6db05f56e5f..befe86ad7ce3 100644
--- a/sdk/identity/azure-identity/tests/test_vscode_credential.py
+++ b/sdk/identity/azure-identity/tests/test_vscode_credential.py
@@ -277,9 +277,7 @@ def test_no_user_settings():
assert transport.send.call_count == 1
-def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+def test_multitenant_authentication():
first_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
@@ -293,7 +291,7 @@ def send(request, **_):
return mock_response(json_payload=build_aad_response(access_token=token))
credential = get_credential(
- tenant_id=first_tenant, allow_multitenant_authentication=True, transport=mock.Mock(send=send)
+ tenant_id=first_tenant, transport=mock.Mock(send=send)
)
with mock.patch(GET_REFRESH_TOKEN, lambda _: "**"):
token = credential.get_token("scope")
@@ -310,10 +308,7 @@ def send(request, **_):
token = credential.get_token("scope")
assert token.token == first_token
-
def test_multitenant_authentication_not_allowed():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
expected_tenant = "expected-tenant"
expected_token = "***"
@@ -329,15 +324,12 @@ def send(request, **_):
token = credential.get_token("scope")
assert token.token == expected_token
- # explicitly specifying the configured tenant is okay
token = credential.get_token("scope", tenant_id=expected_tenant)
assert token.token == expected_token
- # but any other tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- credential.get_token("scope", tenant_id="un" + expected_tenant)
+ token = credential.get_token("scope", tenant_id="un" + expected_tenant)
+ assert token.token == expected_token * 2
- # ...unless the compat switch is enabled
- with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}):
+ with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}):
token = credential.get_token("scope", tenant_id="un" + expected_tenant)
- assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ assert token.token == expected_token
diff --git a/sdk/identity/azure-identity/tests/test_vscode_credential_async.py b/sdk/identity/azure-identity/tests/test_vscode_credential_async.py
index 40c996e39957..ff5716c7d9ad 100644
--- a/sdk/identity/azure-identity/tests/test_vscode_credential_async.py
+++ b/sdk/identity/azure-identity/tests/test_vscode_credential_async.py
@@ -268,9 +268,7 @@ async def test_no_user_settings():
@pytest.mark.asyncio
-async def test_allow_multitenant_authentication():
- """When allow_multitenant_authentication is True, the credential should respect get_token(tenant_id=...)"""
-
+async def test_multitenant_authentication():
first_tenant = "first-tenant"
first_token = "***"
second_tenant = "second-tenant"
@@ -284,7 +282,7 @@ async def send(request, **_):
return mock_response(json_payload=build_aad_response(access_token=token))
credential = get_credential(
- tenant_id=first_tenant, allow_multitenant_authentication=True, transport=mock.Mock(send=send)
+ tenant_id=first_tenant, transport=mock.Mock(send=send)
)
with mock.patch(GET_REFRESH_TOKEN, lambda _: "**"):
token = await credential.get_token("scope")
@@ -301,11 +299,8 @@ async def send(request, **_):
token = await credential.get_token("scope")
assert token.token == first_token
-
@pytest.mark.asyncio
async def test_multitenant_authentication_not_allowed():
- """get_token(tenant_id=...) should raise when allow_multitenant_authentication is False (the default)"""
-
expected_tenant = "expected-tenant"
expected_token = "***"
@@ -321,15 +316,12 @@ async def send(request, **_):
token = await credential.get_token("scope")
assert token.token == expected_token
- # explicitly specifying the configured tenant is okay
token = await credential.get_token("scope", tenant_id=expected_tenant)
assert token.token == expected_token
- # but any other tenant should get an error
- with pytest.raises(ClientAuthenticationError, match="allow_multitenant_authentication"):
- await credential.get_token("scope", tenant_id="un" + expected_tenant)
+ token = await credential.get_token("scope", tenant_id="un" + expected_tenant)
+ assert token.token == expected_token * 2
- # ...unless the compat switch is enabled
- with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_ENABLE_LEGACY_TENANT_SELECTION: "true"}):
+ with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH: "true"}):
token = await credential.get_token("scope", tenant_id="un" + expected_tenant)
- assert token.token == expected_token, "credential should ignore tenant_id kwarg when the compat switch is enabled"
+ assert token.token == expected_token
diff --git a/sdk/keyvault/azure-keyvault-administration/README.md b/sdk/keyvault/azure-keyvault-administration/README.md
index 6d280c29554c..4f1089e1fdb4 100644
--- a/sdk/keyvault/azure-keyvault-administration/README.md
+++ b/sdk/keyvault/azure-keyvault-administration/README.md
@@ -16,6 +16,10 @@ create, manage, and deploy public and private SSL/TLS certificates
[Package (PyPI)][pypi_package_administration] | [API reference documentation][reference_docs] | [Product documentation][keyvault_docs]
+## _Disclaimer_
+
+_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_
+
## Getting started
### Install packages
Install [azure-keyvault-administration][pypi_package_administration] and
diff --git a/sdk/keyvault/azure-keyvault-administration/setup.py b/sdk/keyvault/azure-keyvault-administration/setup.py
index 15a86bfbbeaf..0231b074702c 100644
--- a/sdk/keyvault/azure-keyvault-administration/setup.py
+++ b/sdk/keyvault/azure-keyvault-administration/setup.py
@@ -68,6 +68,7 @@
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
],
zip_safe=False,
diff --git a/sdk/keyvault/azure-keyvault-certificates/README.md b/sdk/keyvault/azure-keyvault-certificates/README.md
index 1d06d3e37c9a..ed21c6c5cf4a 100644
--- a/sdk/keyvault/azure-keyvault-certificates/README.md
+++ b/sdk/keyvault/azure-keyvault-certificates/README.md
@@ -11,6 +11,10 @@ and other secrets
[Source code][certificates_client_src] | [Package (PyPI)][pypi_package_certificates] | [API reference documentation][reference_docs] | [Product documentation][keyvault_docs] | [Samples][certificates_samples]
+## _Disclaimer_
+
+_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_
+
## Getting started
### Install the package
Install [azure-keyvault-certificates][pypi_package_certificates] and
diff --git a/sdk/keyvault/azure-keyvault-certificates/setup.py b/sdk/keyvault/azure-keyvault-certificates/setup.py
index 3dfd40f5726a..e9d54c896c34 100644
--- a/sdk/keyvault/azure-keyvault-certificates/setup.py
+++ b/sdk/keyvault/azure-keyvault-certificates/setup.py
@@ -69,6 +69,7 @@
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
],
zip_safe=False,
diff --git a/sdk/keyvault/azure-keyvault-keys/CHANGELOG.md b/sdk/keyvault/azure-keyvault-keys/CHANGELOG.md
index 8834bd492fb0..7fcb0514b51b 100644
--- a/sdk/keyvault/azure-keyvault-keys/CHANGELOG.md
+++ b/sdk/keyvault/azure-keyvault-keys/CHANGELOG.md
@@ -3,6 +3,10 @@
## 4.5.0b4 (Unreleased)
### Features Added
+- Added support for automated and on-demand key rotation in Azure Key Vault
+ ([#19840](https://github.com/Azure/azure-sdk-for-python/issues/19840))
+ - Added `KeyClient.rotate_key` to rotate a key on-demand
+ - Added `KeyClient.update_key_rotation_policy` to update a key's automated rotation policy
### Breaking Changes
diff --git a/sdk/keyvault/azure-keyvault-keys/README.md b/sdk/keyvault/azure-keyvault-keys/README.md
index bbe3597447b8..ae86032483d9 100644
--- a/sdk/keyvault/azure-keyvault-keys/README.md
+++ b/sdk/keyvault/azure-keyvault-keys/README.md
@@ -13,6 +13,10 @@ create, manage, and deploy public and private SSL/TLS certificates
[Source code][key_client_src] | [Package (PyPI)][pypi_package_keys] | [API reference documentation][reference_docs] | [Product documentation][keyvault_docs] | [Samples][key_samples]
+## _Disclaimer_
+
+_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_
+
## Getting started
### Install packages
Install [azure-keyvault-keys][pypi_package_keys] and
diff --git a/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/__init__.py b/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/__init__.py
index db89d14689a8..ffd2301ad26a 100644
--- a/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/__init__.py
+++ b/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/__init__.py
@@ -2,13 +2,15 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# -------------------------------------
-from ._enums import KeyCurveName, KeyExportEncryptionAlgorithm, KeyOperation, KeyType
+from ._enums import KeyCurveName, KeyExportEncryptionAlgorithm, KeyOperation, KeyRotationPolicyAction, KeyType
from ._shared.client_base import ApiVersion
from ._models import (
DeletedKey,
JsonWebKey,
KeyProperties,
KeyReleasePolicy,
+ KeyRotationLifetimeAction,
+ KeyRotationPolicy,
KeyVaultKey,
KeyVaultKeyIdentifier,
RandomBytes,
@@ -25,10 +27,13 @@
"KeyCurveName",
"KeyExportEncryptionAlgorithm",
"KeyOperation",
+ "KeyRotationPolicyAction",
"KeyType",
"DeletedKey",
"KeyProperties",
"KeyReleasePolicy",
+ "KeyRotationLifetimeAction",
+ "KeyRotationPolicy",
"RandomBytes",
"ReleaseKeyResult",
]
diff --git a/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_client.py b/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_client.py
index 1e561321b9c2..d9ba659d69e5 100644
--- a/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_client.py
+++ b/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_client.py
@@ -8,7 +8,7 @@
from ._shared import KeyVaultClientBase
from ._shared.exceptions import error_map as _error_map
from ._shared._polling import DeleteRecoverPollingMethod, KeyVaultOperationPoller
-from ._models import DeletedKey, KeyVaultKey, KeyProperties, RandomBytes, ReleaseKeyResult
+from ._models import DeletedKey, KeyVaultKey, KeyProperties, KeyRotationPolicy, RandomBytes, ReleaseKeyResult
try:
from typing import TYPE_CHECKING
@@ -17,7 +17,7 @@
if TYPE_CHECKING:
# pylint:disable=unused-import
- from typing import Any, Optional, Union
+ from typing import Any, Iterable, Optional, Union
from azure.core.paging import ItemPaged
from azure.core.polling import LROPoller
from ._models import JsonWebKey
@@ -45,7 +45,7 @@ class KeyClient(KeyVaultClientBase):
:dedent: 4
"""
- # pylint:disable=protected-access
+ # pylint:disable=protected-access, too-many-public-methods
def _get_attributes(self, enabled, not_before, expires_on, exportable=None):
"""Return a KeyAttributes object if none-None attributes are provided, or None otherwise"""
@@ -727,3 +727,70 @@ def get_random_bytes(self, count, **kwargs):
parameters = self._models.GetRandomBytesRequest(count=count)
result = self._client.get_random_bytes(vault_base_url=self._vault_url, parameters=parameters, **kwargs)
return RandomBytes(value=result.value)
+
+ @distributed_trace
+ def get_key_rotation_policy(self, name, **kwargs):
+ # type: (str, **Any) -> KeyRotationPolicy
+ """Get the rotation policy of a Key Vault key.
+
+ :param str name: The name of the key.
+
+ :return: The key rotation policy.
+ :rtype: ~azure.keyvault.keys.KeyRotationPolicy
+ :raises: :class: `~azure.core.exceptions.HttpResponseError`
+ """
+ policy = self._client.get_key_rotation_policy(vault_base_url=self._vault_url, key_name=name, **kwargs)
+ return KeyRotationPolicy._from_generated(policy)
+
+ @distributed_trace
+ def rotate_key(self, name, **kwargs):
+ # type: (str, **Any) -> KeyVaultKey
+ """Rotate the key based on the key policy by generating a new version of the key.
+
+ This operation requires the keys/rotate permission.
+
+ :param str name: The name of the key to rotate.
+
+ :return: The new version of the rotated key.
+ :rtype: ~azure.keyvault.keys.KeyVaultKey
+ :raises: :class:`~azure.core.exceptions.HttpResponseError`
+ """
+ bundle = self._client.rotate_key(vault_base_url=self._vault_url, key_name=name, **kwargs)
+ return KeyVaultKey._from_key_bundle(bundle)
+
+ @distributed_trace
+ def update_key_rotation_policy(self, name, **kwargs):
+ # type: (str, **Any) -> KeyRotationPolicy
+ """Updates the rotation policy of a Key Vault key.
+
+ This operation requires the keys/update permission.
+
+ :param str name: The name of the key in the given vault.
+
+ :keyword lifetime_actions: Actions that will be performed by Key Vault over the lifetime of a key.
+ :paramtype lifetime_actions: Iterable[~azure.keyvault.keys.KeyRotationLifetimeAction]
+ :keyword str expires_in: The expiry time of the policy that will be applied on new key versions, defined as an
+ ISO 8601 duration. For example: 90 days is "P90D", 3 months is "P3M", and 48 hours is "PT48H".
+
+ :return: The updated rotation policy.
+ :rtype: ~azure.keyvault.keys.KeyRotationPolicy
+ :raises: :class:`~azure.core.exceptions.HttpResponseError`
+ """
+ lifetime_actions = kwargs.pop("lifetime_actions", None)
+ if lifetime_actions:
+ lifetime_actions = [
+ self._models.LifetimeActions(
+ action=self._models.LifetimeActionsType(type=action.action),
+ trigger=self._models.LifetimeActionsTrigger(
+ time_after_create=action.time_after_create, time_before_expiry=action.time_before_expiry
+ ),
+ )
+ for action in lifetime_actions
+ ]
+
+ attributes = self._models.KeyRotationPolicyAttributes(expiry_time=kwargs.pop("expires_in", None))
+ policy = self._models.KeyRotationPolicy(lifetime_actions=lifetime_actions, attributes=attributes)
+ result = self._client.update_key_rotation_policy(
+ vault_base_url=self._vault_url, key_name=name, key_rotation_policy=policy
+ )
+ return KeyRotationPolicy._from_generated(result)
diff --git a/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_enums.py b/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_enums.py
index b75eaaa99eb6..ac2667ae0482 100644
--- a/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_enums.py
+++ b/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_enums.py
@@ -39,6 +39,13 @@ class KeyOperation(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
export = "export"
+class KeyRotationPolicyAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
+ """The action that will be executed in a key rotation policy"""
+
+ ROTATE = "Rotate" #: Rotate the key based on the key policy.
+ NOTIFY = "Notify" #: Trigger Event Grid events.
+
+
class KeyType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Supported key types"""
diff --git a/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_models.py b/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_models.py
index 6a127ced8283..c2aa78c6fe29 100644
--- a/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_models.py
+++ b/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_models.py
@@ -16,7 +16,7 @@
from typing import Any, Dict, Optional, List
from datetime import datetime
from ._generated.v7_0 import models as _models
- from ._enums import KeyOperation, KeyType
+ from ._enums import KeyOperation, KeyRotationPolicyAction, KeyType
KeyOperationResult = namedtuple("KeyOperationResult", ["id", "value"])
@@ -279,6 +279,71 @@ def __init__(self, value):
self.value = value
+class KeyRotationLifetimeAction(object):
+ """An action and its corresponding trigger that will be performed by Key Vault over the lifetime of a key.
+
+ :param action: The action that will be executed.
+ :type action: ~azure.keyvault.keys.KeyRotationPolicyAction or str
+
+ :keyword str time_after_create: Time after creation to attempt the specified action, as an ISO 8601 duration.
+ For example, 90 days is "P90D".
+ :keyword str time_before_expiry: Time before expiry to attempt the specified action, as an ISO 8601 duration.
+ For example, 90 days is "P90D".
+ """
+
+ def __init__(self, action, **kwargs):
+ # type: (KeyRotationPolicyAction, **Any) -> None
+ self.action = action
+ self.time_after_create = kwargs.get("time_after_create", None)
+ self.time_before_expiry = kwargs.get("time_before_expiry", None)
+
+ @classmethod
+ def _from_generated(cls, lifetime_action):
+ if lifetime_action.trigger:
+ return cls(
+ action=lifetime_action.action.type,
+ time_after_create=lifetime_action.trigger.time_after_create,
+ time_before_expiry=lifetime_action.trigger.time_before_expiry,
+ )
+ return cls(action=lifetime_action.action)
+
+
+class KeyRotationPolicy(object):
+ """The key rotation policy that belongs to a key.
+
+ :ivar str id: The identifier of the key rotation policy.
+ :ivar lifetime_actions: Actions that will be performed by Key Vault over the lifetime of a key.
+ :type lifetime_actions: list[~azure.keyvault.keys.KeyRotationLifetimeAction]
+ :ivar str expires_in: The expiry time of the policy that will be applied on new key versions, defined as an ISO
+ 8601 duration. For example, 90 days is "P90D".
+ :ivar created_on: When the policy was created, in UTC
+ :type created_on: ~datetime.datetime
+ :ivar updated_on: When the policy was last updated, in UTC
+ :type updated_on: ~datetime.datetime
+ """
+
+ def __init__(self, policy_id, **kwargs):
+ # type: (str, **Any) -> None
+ self.id = policy_id
+ self.lifetime_actions = kwargs.get("lifetime_actions", None)
+ self.expires_in = kwargs.get("expires_in", None)
+ self.created_on = kwargs.get("created_on", None)
+ self.updated_on = kwargs.get("updated_on", None)
+
+ @classmethod
+ def _from_generated(cls, policy):
+ lifetime_actions = [KeyRotationLifetimeAction._from_generated(action) for action in policy.lifetime_actions] # pylint:disable=protected-access
+ if policy.attributes:
+ return cls(
+ policy_id=policy.id,
+ lifetime_actions=lifetime_actions,
+ expires_on=policy.attributes.expiry_time,
+ created_on=policy.attributes.created,
+ updated_on=policy.attributes.updated,
+ )
+ return cls(policy_id=policy.id, lifetime_actions=lifetime_actions)
+
+
class KeyVaultKey(object):
"""A key's attributes and cryptographic material.
diff --git a/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/aio/_client.py b/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/aio/_client.py
index 496844d1aa33..b1e835df7699 100644
--- a/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/aio/_client.py
+++ b/sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/aio/_client.py
@@ -11,12 +11,20 @@
from .._shared._polling_async import AsyncDeleteRecoverPollingMethod
from .._shared import AsyncKeyVaultClientBase
from .._shared.exceptions import error_map as _error_map
-from .. import DeletedKey, JsonWebKey, KeyProperties, KeyVaultKey, RandomBytes, ReleaseKeyResult
+from .. import (
+ DeletedKey,
+ JsonWebKey,
+ KeyProperties,
+ KeyRotationPolicy,
+ KeyVaultKey,
+ RandomBytes,
+ ReleaseKeyResult,
+)
if TYPE_CHECKING:
# pylint:disable=ungrouped-imports
from azure.core.async_paging import AsyncItemPaged
- from typing import Any, Optional, Union
+ from typing import Any, Iterable, Optional, Union
from .. import KeyType
@@ -42,7 +50,7 @@ class KeyClient(AsyncKeyVaultClientBase):
:dedent: 4
"""
- # pylint:disable=protected-access
+ # pylint:disable=protected-access, too-many-public-methods
def _get_attributes(self, enabled, not_before, expires_on, exportable=None):
"""Return a KeyAttributes object if none-None attributes are provided, or None otherwise"""
@@ -702,3 +710,67 @@ async def get_random_bytes(self, count: int, **kwargs: "Any") -> RandomBytes:
parameters = self._models.GetRandomBytesRequest(count=count)
result = await self._client.get_random_bytes(vault_base_url=self._vault_url, parameters=parameters, **kwargs)
return RandomBytes(value=result.value)
+
+ @distributed_trace_async
+ async def get_key_rotation_policy(self, name: str, **kwargs: "Any") -> "KeyRotationPolicy":
+ """Get the rotation policy of a Key Vault key.
+
+ :param str name: The name of the key.
+
+ :return: The key rotation policy.
+ :rtype: ~azure.keyvault.keys.KeyRotationPolicy
+ :raises: :class:`~azure.core.exceptions.HttpResponseError`
+ """
+ policy = await self._client.get_key_rotation_policy(vault_base_url=self._vault_url, key_name=name, **kwargs)
+ return KeyRotationPolicy._from_generated(policy)
+
+ @distributed_trace_async
+ async def rotate_key(self, name: str, **kwargs: "Any") -> KeyVaultKey:
+ """Rotate the key based on the key policy by generating a new version of the key.
+
+ This operation requires the keys/rotate permission.
+
+ :param str name: The name of the key to rotate.
+
+ :return: The new version of the rotated key.
+ :rtype: ~azure.keyvault.keys.KeyVaultKey
+ :raises: :class:`~azure.core.exceptions.HttpResponseError`
+ """
+ bundle = await self._client.rotate_key(vault_base_url=self._vault_url, key_name=name, **kwargs)
+ return KeyVaultKey._from_key_bundle(bundle)
+
+ @distributed_trace_async
+ async def update_key_rotation_policy(self, name: str, **kwargs: "Any") -> KeyRotationPolicy:
+ """Updates the rotation policy of a Key Vault key.
+
+ This operation requires the keys/update permission.
+
+ :param str name: The name of the key in the given vault.
+
+ :keyword lifetime_actions: Actions that will be performed by Key Vault over the lifetime of a key.
+ :paramtype lifetime_actions: Iterable[~azure.keyvault.keys.KeyRotationLifetimeAction]
+ :keyword str expires_in: The expiry time of the policy that will be applied on new key versions, defined as an
+ ISO 8601 duration. For example: 90 days is "P90D", 3 months is "P3M", and 48 hours is "PT48H".
+
+ :return: The updated rotation policy.
+ :rtype: ~azure.keyvault.keys.KeyRotationPolicy
+ :raises: :class:`~azure.core.exceptions.HttpResponseError`
+ """
+ lifetime_actions = kwargs.pop("lifetime_actions", None)
+ if lifetime_actions:
+ lifetime_actions = [
+ self._models.LifetimeActions(
+ action=self._models.LifetimeActionsType(type=action.action),
+ trigger=self._models.LifetimeActionsTrigger(
+ time_after_create=action.time_after_create, time_before_expiry=action.time_before_expiry
+ ),
+ )
+ for action in lifetime_actions
+ ]
+
+ attributes = self._models.KeyRotationPolicyAttributes(expiry_time=kwargs.pop("expires_in", None))
+ policy = self._models.KeyRotationPolicy(lifetime_actions=lifetime_actions, attributes=attributes)
+ result = await self._client.update_key_rotation_policy(
+ vault_base_url=self._vault_url, key_name=name, key_rotation_policy=policy
+ )
+ return KeyRotationPolicy._from_generated(result)
diff --git a/sdk/keyvault/azure-keyvault-keys/setup.py b/sdk/keyvault/azure-keyvault-keys/setup.py
index 5eb6f5522963..3a12c3d53b5d 100644
--- a/sdk/keyvault/azure-keyvault-keys/setup.py
+++ b/sdk/keyvault/azure-keyvault-keys/setup.py
@@ -68,6 +68,7 @@
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
],
zip_safe=False,
diff --git a/sdk/keyvault/azure-keyvault-keys/tests/recordings/test_key_client.test_key_rotation_7_3_preview_vault.yaml b/sdk/keyvault/azure-keyvault-keys/tests/recordings/test_key_client.test_key_rotation_7_3_preview_vault.yaml
new file mode 100644
index 000000000000..bfb85abbeb9a
--- /dev/null
+++ b/sdk/keyvault/azure-keyvault-keys/tests/recordings/test_key_client.test_key_rotation_7_3_preview_vault.yaml
@@ -0,0 +1,148 @@
+interactions:
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '0'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: POST
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-key1cff14c0/create?api-version=7.3-preview
+ response:
+ body:
+ string: '{"error":{"code":"Unauthorized","message":"AKV10000: Request is missing
+ a Bearer or PoP token."}}'
+ headers:
+ cache-control:
+ - no-cache
+ content-length:
+ - '97'
+ content-type:
+ - application/json; charset=utf-8
+ date:
+ - Tue, 28 Sep 2021 00:38:10 GMT
+ expires:
+ - '-1'
+ pragma:
+ - no-cache
+ strict-transport-security:
+ - max-age=31536000;includeSubDomains
+ www-authenticate:
+ - Bearer authorization="https://login.windows.net/72f988bf-86f1-41af-91ab-2d7cd011db47",
+ resource="https://vault.azure.net"
+ x-content-type-options:
+ - nosniff
+ x-ms-keyvault-network-info:
+ - conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region:
+ - westus
+ x-ms-keyvault-service-version:
+ - 1.9.79.2
+ x-powered-by:
+ - ASP.NET
+ status:
+ code: 401
+ message: Unauthorized
+- request:
+ body: '{"kty": "RSA"}'
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '14'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: POST
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-key1cff14c0/create?api-version=7.3-preview
+ response:
+ body:
+ string: '{"key":{"kid":"https://vaultname.vault.azure.net/keys/livekvtestrotation-key1cff14c0/8d038a3884a24dc997ef9d529debf31a","kty":"RSA","key_ops":["encrypt","decrypt","sign","verify","wrapKey","unwrapKey"],"n":"1aJXI-PznJu_7RfGmgUSvqRptbKTYSWOpEDJQShmUHOy8ri9TlC0r6vZ0Ek21dj6gpWpnxu91-0-IVOaLn4RRBl74OlcRiSGzzbXdVaRwTS9-GAbq7DfXKVP0ZSm37fexkPtrqk94uOqsybGbjUgJ2CF645b7yJAfcpqiQmx4i7NTriie1rH4CzXq5YaecOPw9fwZEh4Wqup6gl2mdAGAjZW72LCWsIvAccJLp2DQ4ZBJ-qMltWwKnMcDPMDRtOemo6FaZW41BKqCQy-e0fdHIWhyWp0GkqylJ3OumWAg50P3PjevWT_rltPovwcvOy4upvS4IEkKDJHVJFujE32fQ","e":"AQAB"},"attributes":{"enabled":true,"created":1632789491,"updated":1632789491,"recoveryLevel":"Recoverable+Purgeable","recoverableDays":90}}'
+ headers:
+ cache-control:
+ - no-cache
+ content-length:
+ - '698'
+ content-type:
+ - application/json; charset=utf-8
+ date:
+ - Tue, 28 Sep 2021 00:38:10 GMT
+ expires:
+ - '-1'
+ pragma:
+ - no-cache
+ strict-transport-security:
+ - max-age=31536000;includeSubDomains
+ x-content-type-options:
+ - nosniff
+ x-ms-keyvault-network-info:
+ - conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region:
+ - westus
+ x-ms-keyvault-service-version:
+ - 1.9.79.2
+ x-powered-by:
+ - ASP.NET
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '0'
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: POST
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-key1cff14c0/rotate?api-version=7.3-preview
+ response:
+ body:
+ string: '{"key":{"kid":"https://vaultname.vault.azure.net/keys/livekvtestrotation-key1cff14c0/7d2cb7c5012d41bd87ff36c5fa845cfc","kty":"RSA","key_ops":["encrypt","decrypt","sign","verify","wrapKey","unwrapKey"],"n":"wB5MHXYWo2l-8yOrk4bjheXi95VicMvJpSDInALbtMd6V9IzanKUKHoFD7sUrx5vBeApR07Vww9WPPWr-wAMQ8d6z7-qby7xF5SCWCZNWCXmKotUI9WH-ExRmZGzWCkbvsDE4fEh-uDLqh55-tGZSJMJRkc9DDl97_kdJyPILbwoQjQrAHghQJSC9JonWOS70GfTBKoIdgJrxKimHi61OnGDEIqnCuUbQmv9C3P_bNYMpV53F8M4Yxtcjmv_epiEh33rWtPXm-0S_kLbO3Ln0EoM09CaW1h5d3OdR9qy7r1YycpIaBHJHoF9LAbCUUTO8zUEDCKJglkfCmtRrZnL9Q","e":"AQAB"},"attributes":{"enabled":true,"created":1632789491,"updated":1632789491,"recoveryLevel":"Recoverable+Purgeable","recoverableDays":90}}'
+ headers:
+ cache-control:
+ - no-cache
+ content-length:
+ - '698'
+ content-type:
+ - application/json; charset=utf-8
+ date:
+ - Tue, 28 Sep 2021 00:38:11 GMT
+ expires:
+ - '-1'
+ pragma:
+ - no-cache
+ strict-transport-security:
+ - max-age=31536000;includeSubDomains
+ x-content-type-options:
+ - nosniff
+ x-ms-keyvault-network-info:
+ - conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region:
+ - westus
+ x-ms-keyvault-service-version:
+ - 1.9.79.2
+ x-powered-by:
+ - ASP.NET
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/sdk/keyvault/azure-keyvault-keys/tests/recordings/test_key_client.test_key_rotation_policy_7_3_preview_vault.yaml b/sdk/keyvault/azure-keyvault-keys/tests/recordings/test_key_client.test_key_rotation_policy_7_3_preview_vault.yaml
new file mode 100644
index 000000000000..5c04af45b520
--- /dev/null
+++ b/sdk/keyvault/azure-keyvault-keys/tests/recordings/test_key_client.test_key_rotation_policy_7_3_preview_vault.yaml
@@ -0,0 +1,288 @@
+interactions:
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '0'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: POST
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-keybd0f17af/create?api-version=7.3-preview
+ response:
+ body:
+ string: '{"error":{"code":"Unauthorized","message":"AKV10000: Request is missing
+ a Bearer or PoP token."}}'
+ headers:
+ cache-control:
+ - no-cache
+ content-length:
+ - '97'
+ content-type:
+ - application/json; charset=utf-8
+ date:
+ - Tue, 28 Sep 2021 20:28:54 GMT
+ expires:
+ - '-1'
+ pragma:
+ - no-cache
+ strict-transport-security:
+ - max-age=31536000;includeSubDomains
+ www-authenticate:
+ - Bearer authorization="https://login.windows.net/72f988bf-86f1-41af-91ab-2d7cd011db47",
+ resource="https://vault.azure.net"
+ x-content-type-options:
+ - nosniff
+ x-ms-keyvault-network-info:
+ - conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region:
+ - westus
+ x-ms-keyvault-service-version:
+ - 1.9.79.2
+ x-powered-by:
+ - ASP.NET
+ status:
+ code: 401
+ message: Unauthorized
+- request:
+ body: '{"kty": "RSA"}'
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '14'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: POST
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-keybd0f17af/create?api-version=7.3-preview
+ response:
+ body:
+ string: '{"key":{"kid":"https://vaultname.vault.azure.net/keys/livekvtestrotation-keybd0f17af/1f1dcc214e0649298fc9d8431f133d70","kty":"RSA","key_ops":["encrypt","decrypt","sign","verify","wrapKey","unwrapKey"],"n":"xuqdP_vk8J3YrTpuVGWdaqKyYur6DxjrNIf8ytP5gbe5FNZ6gWdR3Owy8DhQoR699a9XLLzuqa-h7DasW_n-HXoZ_yRcJq4SHi-FG4PkZdJB9rh54l8dbzFn-8anGdTcVEPqFn9B1xOdf8aSZ1QIyW2io9gpLtpWY9rvQjtK8N332KWLxDHDxgOIq7jbexsa9DDWgKzcA_g37PEwpRYSb4NtJkvk4ztMa_OhEEwL7HIzUJjCQjKZRRGIVZxNWhakMQrdb4qhxQ8PNPmMFcuY9TVJ8hzVCZHQrsristcBcE1IyhvQoHYnCnDX3dD5FLyhPAoLVqmP6Z0_2w68-k-nfQ","e":"AQAB"},"attributes":{"enabled":true,"exp":1640636935,"created":1632860935,"updated":1632860935,"recoveryLevel":"Recoverable+Purgeable","recoverableDays":90}}'
+ headers:
+ cache-control:
+ - no-cache
+ content-length:
+ - '715'
+ content-type:
+ - application/json; charset=utf-8
+ date:
+ - Tue, 28 Sep 2021 20:28:55 GMT
+ expires:
+ - '-1'
+ pragma:
+ - no-cache
+ strict-transport-security:
+ - max-age=31536000;includeSubDomains
+ x-content-type-options:
+ - nosniff
+ x-ms-keyvault-network-info:
+ - conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region:
+ - westus
+ x-ms-keyvault-service-version:
+ - 1.9.79.2
+ x-powered-by:
+ - ASP.NET
+ status:
+ code: 200
+ message: OK
+- request:
+ body: '{"lifetimeActions": [{"trigger": {"timeBeforeExpiry": "P30D"}, "action":
+ {"type": "Rotate"}}], "attributes": {"expiryTime": "P90D"}}'
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '132'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: PUT
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-keybd0f17af/rotationpolicy?api-version=7.3-preview
+ response:
+ body:
+ string: '{"id":"https://vaultname.vault.azure.net/keys/livekvtestrotation-keybd0f17af/rotationpolicy","lifetimeActions":[{"trigger":{"timeBeforeExpiry":"P30D"},"action":{"type":"Rotate"}},{"trigger":{"timeBeforeExpiry":"P30D"},"action":{"type":"Notify"}}],"attributes":{"expiryTime":"P90D","created":1632795088,"updated":1632795088}}'
+ headers:
+ cache-control:
+ - no-cache
+ content-length:
+ - '327'
+ content-type:
+ - application/json; charset=utf-8
+ date:
+ - Tue, 28 Sep 2021 20:28:55 GMT
+ expires:
+ - '-1'
+ pragma:
+ - no-cache
+ strict-transport-security:
+ - max-age=31536000;includeSubDomains
+ x-content-type-options:
+ - nosniff
+ x-ms-keyvault-network-info:
+ - conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region:
+ - westus
+ x-ms-keyvault-service-version:
+ - 1.9.79.2
+ x-powered-by:
+ - ASP.NET
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: GET
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-keybd0f17af/rotationpolicy?api-version=7.3-preview
+ response:
+ body:
+ string: '{"id":"https://vaultname.vault.azure.net/keys/livekvtestrotation-keybd0f17af/rotationpolicy","lifetimeActions":[{"trigger":{"timeBeforeExpiry":"P30D"},"action":{"type":"Rotate"}},{"trigger":{"timeBeforeExpiry":"P30D"},"action":{"type":"Notify"}}],"attributes":{"expiryTime":"P90D","created":1632795088,"updated":1632795088}}'
+ headers:
+ cache-control:
+ - no-cache
+ content-length:
+ - '327'
+ content-type:
+ - application/json; charset=utf-8
+ date:
+ - Tue, 28 Sep 2021 20:28:55 GMT
+ expires:
+ - '-1'
+ pragma:
+ - no-cache
+ strict-transport-security:
+ - max-age=31536000;includeSubDomains
+ x-content-type-options:
+ - nosniff
+ x-ms-keyvault-network-info:
+ - conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region:
+ - westus
+ x-ms-keyvault-service-version:
+ - 1.9.79.2
+ x-powered-by:
+ - ASP.NET
+ status:
+ code: 200
+ message: OK
+- request:
+ body: '{"lifetimeActions": [{"trigger": {"timeAfterCreate": "P2M"}, "action":
+ {"type": "Notify"}}], "attributes": {}}'
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '110'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: PUT
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-keybd0f17af/rotationpolicy?api-version=7.3-preview
+ response:
+ body:
+ string: '{"id":"https://vaultname.vault.azure.net/keys/livekvtestrotation-keybd0f17af/rotationpolicy","lifetimeActions":[{"trigger":{"timeAfterCreate":"P2M"},"action":{"type":"Notify"}}],"attributes":{"created":1632795088,"updated":1632860936}}'
+ headers:
+ cache-control:
+ - no-cache
+ content-length:
+ - '238'
+ content-type:
+ - application/json; charset=utf-8
+ date:
+ - Tue, 28 Sep 2021 20:28:55 GMT
+ expires:
+ - '-1'
+ pragma:
+ - no-cache
+ strict-transport-security:
+ - max-age=31536000;includeSubDomains
+ x-content-type-options:
+ - nosniff
+ x-ms-keyvault-network-info:
+ - conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region:
+ - westus
+ x-ms-keyvault-service-version:
+ - 1.9.79.2
+ x-powered-by:
+ - ASP.NET
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: GET
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-keybd0f17af/rotationpolicy?api-version=7.3-preview
+ response:
+ body:
+ string: '{"id":"https://vaultname.vault.azure.net/keys/livekvtestrotation-keybd0f17af/rotationpolicy","lifetimeActions":[{"trigger":{"timeAfterCreate":"P2M"},"action":{"type":"Notify"}}],"attributes":{"created":1632795088,"updated":1632860936}}'
+ headers:
+ cache-control:
+ - no-cache
+ content-length:
+ - '238'
+ content-type:
+ - application/json; charset=utf-8
+ date:
+ - Tue, 28 Sep 2021 20:28:56 GMT
+ expires:
+ - '-1'
+ pragma:
+ - no-cache
+ strict-transport-security:
+ - max-age=31536000;includeSubDomains
+ x-content-type-options:
+ - nosniff
+ x-ms-keyvault-network-info:
+ - conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region:
+ - westus
+ x-ms-keyvault-service-version:
+ - 1.9.79.2
+ x-powered-by:
+ - ASP.NET
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/sdk/keyvault/azure-keyvault-keys/tests/recordings/test_keys_async.test_key_rotation_7_3_preview_vault.yaml b/sdk/keyvault/azure-keyvault-keys/tests/recordings/test_keys_async.test_key_rotation_7_3_preview_vault.yaml
new file mode 100644
index 000000000000..5c5d154bff36
--- /dev/null
+++ b/sdk/keyvault/azure-keyvault-keys/tests/recordings/test_keys_async.test_key_rotation_7_3_preview_vault.yaml
@@ -0,0 +1,100 @@
+interactions:
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Content-Length:
+ - '0'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: POST
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-key201714d2/create?api-version=7.3-preview
+ response:
+ body:
+ string: '{"error":{"code":"Unauthorized","message":"AKV10000: Request is missing
+ a Bearer or PoP token."}}'
+ headers:
+ cache-control: no-cache
+ content-length: '97'
+ content-type: application/json; charset=utf-8
+ date: Tue, 28 Sep 2021 02:39:57 GMT
+ expires: '-1'
+ pragma: no-cache
+ strict-transport-security: max-age=31536000;includeSubDomains
+ www-authenticate: Bearer authorization="https://login.windows.net/72f988bf-86f1-41af-91ab-2d7cd011db47",
+ resource="https://vault.azure.net"
+ x-content-type-options: nosniff
+ x-ms-keyvault-network-info: conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region: westus
+ x-ms-keyvault-service-version: 1.9.79.2
+ x-powered-by: ASP.NET
+ status:
+ code: 401
+ message: Unauthorized
+ url: https://mcpatinotest.vault.azure.net/keys/livekvtestrotation-key201714d2/create?api-version=7.3-preview
+- request:
+ body: '{"kty": "RSA"}'
+ headers:
+ Accept:
+ - application/json
+ Content-Length:
+ - '14'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: POST
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-key201714d2/create?api-version=7.3-preview
+ response:
+ body:
+ string: '{"key":{"kid":"https://vaultname.vault.azure.net/keys/livekvtestrotation-key201714d2/efb9c8e922a647e0bac7c713c3266428","kty":"RSA","key_ops":["encrypt","decrypt","sign","verify","wrapKey","unwrapKey"],"n":"1vbwKr3d5P_CSOJfLHp0VWLlqeptK5NIh7raFt4aCuG2npczGgNKznjdFeUt3MborSWYyEY4DlZWGJsvA7TOAQ13k9-u9xe2g1wYfgNw5X8WAju2BO69ylBQ7aPEZeoEfO12ub0a3uvA0Ooct8OC-nyalyk7qZON8xJoj0Todagwq-njgwuZask3XJVAx-yGobwFcFQgynrq5ScRcHTJSKGnTp7FniJqAg6tD6nTUWq_RlmgQ6f5cok0AAvChauDzes4T-3glcMucqSJohAGyqXkcETiBOfZH5keVmFdi5n-9C6dRFq3OjJltl-JhVAI6nL18chVrZxF6C0qApcBVQ","e":"AQAB"},"attributes":{"enabled":true,"created":1632796798,"updated":1632796798,"recoveryLevel":"Recoverable+Purgeable","recoverableDays":90}}'
+ headers:
+ cache-control: no-cache
+ content-length: '698'
+ content-type: application/json; charset=utf-8
+ date: Tue, 28 Sep 2021 02:39:58 GMT
+ expires: '-1'
+ pragma: no-cache
+ strict-transport-security: max-age=31536000;includeSubDomains
+ x-content-type-options: nosniff
+ x-ms-keyvault-network-info: conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region: westus
+ x-ms-keyvault-service-version: 1.9.79.2
+ x-powered-by: ASP.NET
+ status:
+ code: 200
+ message: OK
+ url: https://mcpatinotest.vault.azure.net/keys/livekvtestrotation-key201714d2/create?api-version=7.3-preview
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: POST
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-key201714d2/rotate?api-version=7.3-preview
+ response:
+ body:
+ string: '{"key":{"kid":"https://vaultname.vault.azure.net/keys/livekvtestrotation-key201714d2/d91392d890cb485fa99f84391f7d845b","kty":"RSA","key_ops":["encrypt","decrypt","sign","verify","wrapKey","unwrapKey"],"n":"4SWEheIFMLmxTdx92NaXipleHUHoTuvqarTdQ4fhtAPzHtRdGKTjbCFjT-c21T7x0gkDhPJsCMzO9VjuLKvdBiRziZarxEU_1Sv9NGoWBRFdLbFGh5ZV1eDklQxXQuIxgIsz177fzI--ErbIhA1zVfpOuUbart1Kj99VZsqv8PhkwN065SfNrOrVtGmW1rhBZUA6-l7wVY10iTCgCfPww36LrTE66soNNt6rRk03_Xmi_08va3jzmg89MQ3UZL94W-dDXVU25vmNV4MIhclPxB15WwSEstLrJqvlxNzAbTwt9akzaWHaihVuK_Ug3sTl-ye232jdnpqdWIt8VgojEQ","e":"AQAB"},"attributes":{"enabled":true,"created":1632796798,"updated":1632796798,"recoveryLevel":"Recoverable+Purgeable","recoverableDays":90}}'
+ headers:
+ cache-control: no-cache
+ content-length: '698'
+ content-type: application/json; charset=utf-8
+ date: Tue, 28 Sep 2021 02:39:58 GMT
+ expires: '-1'
+ pragma: no-cache
+ strict-transport-security: max-age=31536000;includeSubDomains
+ x-content-type-options: nosniff
+ x-ms-keyvault-network-info: conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region: westus
+ x-ms-keyvault-service-version: 1.9.79.2
+ x-powered-by: ASP.NET
+ status:
+ code: 200
+ message: OK
+ url: https://mcpatinotest.vault.azure.net/keys/livekvtestrotation-key201714d2/rotate?api-version=7.3-preview
+version: 1
diff --git a/sdk/keyvault/azure-keyvault-keys/tests/recordings/test_keys_async.test_key_rotation_policy_7_3_preview_vault.yaml b/sdk/keyvault/azure-keyvault-keys/tests/recordings/test_keys_async.test_key_rotation_policy_7_3_preview_vault.yaml
new file mode 100644
index 000000000000..343b4a0cc575
--- /dev/null
+++ b/sdk/keyvault/azure-keyvault-keys/tests/recordings/test_keys_async.test_key_rotation_policy_7_3_preview_vault.yaml
@@ -0,0 +1,197 @@
+interactions:
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Content-Length:
+ - '0'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: POST
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/create?api-version=7.3-preview
+ response:
+ body:
+ string: '{"error":{"code":"Unauthorized","message":"AKV10000: Request is missing
+ a Bearer or PoP token."}}'
+ headers:
+ cache-control: no-cache
+ content-length: '97'
+ content-type: application/json; charset=utf-8
+ date: Tue, 28 Sep 2021 20:30:13 GMT
+ expires: '-1'
+ pragma: no-cache
+ strict-transport-security: max-age=31536000;includeSubDomains
+ www-authenticate: Bearer authorization="https://login.windows.net/72f988bf-86f1-41af-91ab-2d7cd011db47",
+ resource="https://vault.azure.net"
+ x-content-type-options: nosniff
+ x-ms-keyvault-network-info: conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region: westus
+ x-ms-keyvault-service-version: 1.9.79.2
+ x-powered-by: ASP.NET
+ status:
+ code: 401
+ message: Unauthorized
+ url: https://mcpatinotest.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/create?api-version=7.3-preview
+- request:
+ body: '{"kty": "RSA"}'
+ headers:
+ Accept:
+ - application/json
+ Content-Length:
+ - '14'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: POST
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/create?api-version=7.3-preview
+ response:
+ body:
+ string: '{"key":{"kid":"https://vaultname.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/d576940c117f4f3cb9ddb667c1042a8d","kty":"RSA","key_ops":["encrypt","decrypt","sign","verify","wrapKey","unwrapKey"],"n":"tLipS2sqtYKME4ffnq8-tGTxE8vCKp0zfIWclCIrorV-I1MzWsZzNUyyVfjoKvk2h1CKTVBar8d00eCvvPRHeSd-YalDTobAfmLM58xej3MT27f5p7t_sfkK3ZP5BoGssbKHXR_7RI2vvNb_VirqzF1leJKRu-FMs0xdjBfqaiqmyWQZFSLXEPoAkE3BQ42BsIrXhQFQVixoNMaiY9_LOb2zl2_MHhvKRw2ZO9DEIJ33IWFcH5crr2UklJxkAOmCVNKfRbZkxjPeGJKUxypoyVer7kWUNdleDPhupk6RWnGta9npIxSsB0DjQSrXf8G_McNB2Q4SBymfXrJRO8RREQ","e":"AQAB"},"attributes":{"enabled":true,"exp":1640637014,"created":1632861014,"updated":1632861014,"recoveryLevel":"Recoverable+Purgeable","recoverableDays":90}}'
+ headers:
+ cache-control: no-cache
+ content-length: '715'
+ content-type: application/json; charset=utf-8
+ date: Tue, 28 Sep 2021 20:30:14 GMT
+ expires: '-1'
+ pragma: no-cache
+ strict-transport-security: max-age=31536000;includeSubDomains
+ x-content-type-options: nosniff
+ x-ms-keyvault-network-info: conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region: westus
+ x-ms-keyvault-service-version: 1.9.79.2
+ x-powered-by: ASP.NET
+ status:
+ code: 200
+ message: OK
+ url: https://mcpatinotest.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/create?api-version=7.3-preview
+- request:
+ body: '{"lifetimeActions": [{"trigger": {"timeBeforeExpiry": "P30D"}, "action":
+ {"type": "Rotate"}}], "attributes": {"expiryTime": "P90D"}}'
+ headers:
+ Accept:
+ - application/json
+ Content-Length:
+ - '132'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: PUT
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/rotationpolicy?api-version=7.3-preview
+ response:
+ body:
+ string: '{"id":"https://vaultname.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/rotationpolicy","lifetimeActions":[{"trigger":{"timeBeforeExpiry":"P30D"},"action":{"type":"Rotate"}},{"trigger":{"timeBeforeExpiry":"P30D"},"action":{"type":"Notify"}}],"attributes":{"expiryTime":"P90D","created":1632796802,"updated":1632796802}}'
+ headers:
+ cache-control: no-cache
+ content-length: '327'
+ content-type: application/json; charset=utf-8
+ date: Tue, 28 Sep 2021 20:30:14 GMT
+ expires: '-1'
+ pragma: no-cache
+ strict-transport-security: max-age=31536000;includeSubDomains
+ x-content-type-options: nosniff
+ x-ms-keyvault-network-info: conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region: westus
+ x-ms-keyvault-service-version: 1.9.79.2
+ x-powered-by: ASP.NET
+ status:
+ code: 200
+ message: OK
+ url: https://mcpatinotest.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/rotationpolicy?api-version=7.3-preview
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: GET
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/rotationpolicy?api-version=7.3-preview
+ response:
+ body:
+ string: '{"id":"https://vaultname.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/rotationpolicy","lifetimeActions":[{"trigger":{"timeBeforeExpiry":"P30D"},"action":{"type":"Rotate"}},{"trigger":{"timeBeforeExpiry":"P30D"},"action":{"type":"Notify"}}],"attributes":{"expiryTime":"P90D","created":1632796802,"updated":1632796802}}'
+ headers:
+ cache-control: no-cache
+ content-length: '327'
+ content-type: application/json; charset=utf-8
+ date: Tue, 28 Sep 2021 20:30:14 GMT
+ expires: '-1'
+ pragma: no-cache
+ strict-transport-security: max-age=31536000;includeSubDomains
+ x-content-type-options: nosniff
+ x-ms-keyvault-network-info: conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region: westus
+ x-ms-keyvault-service-version: 1.9.79.2
+ x-powered-by: ASP.NET
+ status:
+ code: 200
+ message: OK
+ url: https://mcpatinotest.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/rotationpolicy?api-version=7.3-preview
+- request:
+ body: '{"lifetimeActions": [{"trigger": {"timeAfterCreate": "P2M"}, "action":
+ {"type": "Notify"}}], "attributes": {}}'
+ headers:
+ Accept:
+ - application/json
+ Content-Length:
+ - '110'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: PUT
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/rotationpolicy?api-version=7.3-preview
+ response:
+ body:
+ string: '{"id":"https://vaultname.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/rotationpolicy","lifetimeActions":[{"trigger":{"timeAfterCreate":"P2M"},"action":{"type":"Notify"}}],"attributes":{"created":1632796802,"updated":1632861014}}'
+ headers:
+ cache-control: no-cache
+ content-length: '238'
+ content-type: application/json; charset=utf-8
+ date: Tue, 28 Sep 2021 20:30:14 GMT
+ expires: '-1'
+ pragma: no-cache
+ strict-transport-security: max-age=31536000;includeSubDomains
+ x-content-type-options: nosniff
+ x-ms-keyvault-network-info: conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region: westus
+ x-ms-keyvault-service-version: 1.9.79.2
+ x-powered-by: ASP.NET
+ status:
+ code: 200
+ message: OK
+ url: https://mcpatinotest.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/rotationpolicy?api-version=7.3-preview
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ User-Agent:
+ - azsdk-python-keyvault-keys/4.5.0b4 Python/3.9.0 (Windows-10-10.0.19041-SP0)
+ method: GET
+ uri: https://vaultname.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/rotationpolicy?api-version=7.3-preview
+ response:
+ body:
+ string: '{"id":"https://vaultname.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/rotationpolicy","lifetimeActions":[{"trigger":{"timeAfterCreate":"P2M"},"action":{"type":"Notify"}}],"attributes":{"created":1632796802,"updated":1632861014}}'
+ headers:
+ cache-control: no-cache
+ content-length: '238'
+ content-type: application/json; charset=utf-8
+ date: Tue, 28 Sep 2021 20:30:14 GMT
+ expires: '-1'
+ pragma: no-cache
+ strict-transport-security: max-age=31536000;includeSubDomains
+ x-content-type-options: nosniff
+ x-ms-keyvault-network-info: conn_type=Ipv4;addr=172.92.159.124;act_addr_fam=InterNetwork;
+ x-ms-keyvault-region: westus
+ x-ms-keyvault-service-version: 1.9.79.2
+ x-powered-by: ASP.NET
+ status:
+ code: 200
+ message: OK
+ url: https://mcpatinotest.vault.azure.net/keys/livekvtestrotation-keyc0a517c1/rotationpolicy?api-version=7.3-preview
+version: 1
diff --git a/sdk/keyvault/azure-keyvault-keys/tests/test_key_client.py b/sdk/keyvault/azure-keyvault-keys/tests/test_key_client.py
index 1609ededad21..e3ba694fd382 100644
--- a/sdk/keyvault/azure-keyvault-keys/tests/test_key_client.py
+++ b/sdk/keyvault/azure-keyvault-keys/tests/test_key_client.py
@@ -11,7 +11,14 @@
from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError
from azure.core.pipeline.policies import SansIOHTTPPolicy
-from azure.keyvault.keys import ApiVersion, JsonWebKey, KeyClient, KeyReleasePolicy
+from azure.keyvault.keys import (
+ ApiVersion,
+ JsonWebKey,
+ KeyClient,
+ KeyReleasePolicy,
+ KeyRotationLifetimeAction,
+ KeyRotationPolicyAction,
+)
import pytest
from six import byte2int
@@ -22,10 +29,24 @@
all_api_versions = get_decorator()
only_hsm = get_decorator(only_hsm=True)
only_hsm_7_3_preview = get_decorator(only_hsm=True, api_versions=[ApiVersion.V7_3_PREVIEW])
+only_vault_7_3_preview = get_decorator(only_vault=True, api_versions=[ApiVersion.V7_3_PREVIEW])
logging_enabled = get_decorator(logging_enable=True)
logging_disabled = get_decorator(logging_enable=False)
+def _assert_rotation_policies_equal(p1, p2):
+ assert p1.id == p2.id
+ assert p1.expires_in == p2.expires_in
+ assert p1.created_on == p2.created_on
+ assert p1.updated_on == p2.updated_on
+ assert len(p1.lifetime_actions) == len(p2.lifetime_actions)
+
+def _assert_lifetime_actions_equal(a1, a2):
+ assert a1.action == a2.action
+ assert a1.time_after_create == a2.time_after_create
+ assert a1.time_before_expiry == a2.time_before_expiry
+
+
# used for logging tests
class MockHandler(logging.Handler):
def __init__(self):
@@ -508,6 +529,48 @@ def test_update_release_policy(self, client, **kwargs):
self._update_key_properties(client, key, new_release_policy)
+ @only_vault_7_3_preview()
+ @client_setup
+ def test_key_rotation(self, client, **kwargs):
+ key_name = self.get_resource_name("rotation-key")
+ key = self._create_rsa_key(client, key_name)
+ rotated_key = client.rotate_key(key_name)
+
+ # the rotated key should have a new ID, version, and key material (for RSA, n and e fields)
+ assert key.id != rotated_key.id
+ assert key.properties.version != rotated_key.properties.version
+ assert key.key.n != rotated_key.key.n
+
+ @only_vault_7_3_preview()
+ @client_setup
+ def test_key_rotation_policy(self, client, **kwargs):
+ key_name = self.get_resource_name("rotation-key")
+ self._create_rsa_key(client, key_name)
+
+ actions = [KeyRotationLifetimeAction(KeyRotationPolicyAction.ROTATE, time_before_expiry="P30D")]
+ updated_policy = client.update_key_rotation_policy(key_name, expires_in="P90D", lifetime_actions=actions)
+ fetched_policy = client.get_key_rotation_policy(key_name)
+ _assert_rotation_policies_equal(updated_policy, fetched_policy)
+
+ updated_policy_actions = updated_policy.lifetime_actions[0]
+ fetched_policy_actions = fetched_policy.lifetime_actions[0]
+ assert updated_policy_actions.action == KeyRotationPolicyAction.ROTATE
+ assert updated_policy_actions.time_after_create is None
+ assert updated_policy_actions.time_before_expiry == "P30D"
+ _assert_lifetime_actions_equal(updated_policy_actions, fetched_policy_actions)
+
+ new_actions = [KeyRotationLifetimeAction(KeyRotationPolicyAction.NOTIFY, time_after_create="P2M")]
+ new_policy = client.update_key_rotation_policy(key_name, lifetime_actions=new_actions)
+ new_fetched_policy = client.get_key_rotation_policy(key_name)
+ _assert_rotation_policies_equal(new_policy, new_fetched_policy)
+
+ new_policy_actions = new_policy.lifetime_actions[0]
+ new_fetched_policy_actions = new_fetched_policy.lifetime_actions[0]
+ assert new_policy_actions.action == KeyRotationPolicyAction.NOTIFY
+ assert new_policy_actions.time_after_create == "P2M"
+ assert new_policy_actions.time_before_expiry is None
+ _assert_lifetime_actions_equal(new_policy_actions, new_fetched_policy_actions)
+
def test_positive_bytes_count_required():
client = KeyClient("...", object())
diff --git a/sdk/keyvault/azure-keyvault-keys/tests/test_keys_async.py b/sdk/keyvault/azure-keyvault-keys/tests/test_keys_async.py
index 17a2604bd223..524ebb062c81 100644
--- a/sdk/keyvault/azure-keyvault-keys/tests/test_keys_async.py
+++ b/sdk/keyvault/azure-keyvault-keys/tests/test_keys_async.py
@@ -11,18 +11,26 @@
from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError
from azure.core.pipeline.policies import SansIOHTTPPolicy
-from azure.keyvault.keys import ApiVersion, JsonWebKey, KeyReleasePolicy
+from azure.keyvault.keys import (
+ ApiVersion,
+ JsonWebKey,
+ KeyReleasePolicy,
+ KeyRotationLifetimeAction,
+ KeyRotationPolicyAction,
+)
from azure.keyvault.keys.aio import KeyClient
import pytest
from six import byte2int
from _shared.test_case_async import KeyVaultTestCase
from _test_case import client_setup, get_attestation_token, get_decorator, get_release_policy, KeysTestCase
+from test_key_client import _assert_lifetime_actions_equal, _assert_rotation_policies_equal
all_api_versions = get_decorator(is_async=True)
only_hsm = get_decorator(only_hsm=True, is_async=True)
only_hsm_7_3_preview = get_decorator(only_hsm=True, is_async=True, api_versions=[ApiVersion.V7_3_PREVIEW])
+only_vault_7_3_preview = get_decorator(only_vault=True, is_async=True, api_versions=[ApiVersion.V7_3_PREVIEW])
logging_enabled = get_decorator(is_async=True, logging_enable=True)
logging_disabled = get_decorator(is_async=True, logging_enable=False)
@@ -538,6 +546,48 @@ async def test_update_release_policy(self, client, **kwargs):
await self._update_key_properties(client, key, new_release_policy)
+ @only_vault_7_3_preview()
+ @client_setup
+ async def test_key_rotation(self, client, **kwargs):
+ key_name = self.get_resource_name("rotation-key")
+ key = await self._create_rsa_key(client, key_name)
+ rotated_key = await client.rotate_key(key_name)
+
+ # the rotated key should have a new ID, version, and key material (for RSA, n and e fields)
+ assert key.id != rotated_key.id
+ assert key.properties.version != rotated_key.properties.version
+ assert key.key.n != rotated_key.key.n
+
+ @only_vault_7_3_preview()
+ @client_setup
+ async def test_key_rotation_policy(self, client, **kwargs):
+ key_name = self.get_resource_name("rotation-key")
+ await self._create_rsa_key(client, key_name)
+
+ actions = [KeyRotationLifetimeAction(KeyRotationPolicyAction.ROTATE, time_before_expiry="P30D")]
+ updated_policy = await client.update_key_rotation_policy(key_name, expires_in="P90D", lifetime_actions=actions)
+ fetched_policy = await client.get_key_rotation_policy(key_name)
+ _assert_rotation_policies_equal(updated_policy, fetched_policy)
+
+ updated_policy_actions = updated_policy.lifetime_actions[0]
+ fetched_policy_actions = fetched_policy.lifetime_actions[0]
+ assert updated_policy_actions.action == KeyRotationPolicyAction.ROTATE
+ assert updated_policy_actions.time_after_create is None
+ assert updated_policy_actions.time_before_expiry == "P30D"
+ _assert_lifetime_actions_equal(updated_policy_actions, fetched_policy_actions)
+
+ new_actions = [KeyRotationLifetimeAction(KeyRotationPolicyAction.NOTIFY, time_after_create="P2M")]
+ new_policy = await client.update_key_rotation_policy(key_name, lifetime_actions=new_actions)
+ new_fetched_policy = await client.get_key_rotation_policy(key_name)
+ _assert_rotation_policies_equal(new_policy, new_fetched_policy)
+
+ new_policy_actions = new_policy.lifetime_actions[0]
+ new_fetched_policy_actions = new_fetched_policy.lifetime_actions[0]
+ assert new_policy_actions.action == KeyRotationPolicyAction.NOTIFY
+ assert new_policy_actions.time_after_create == "P2M"
+ assert new_policy_actions.time_before_expiry is None
+ _assert_lifetime_actions_equal(new_policy_actions, new_fetched_policy_actions)
+
@pytest.mark.asyncio
async def test_positive_bytes_count_required():
diff --git a/sdk/keyvault/azure-keyvault-secrets/README.md b/sdk/keyvault/azure-keyvault-secrets/README.md
index 0ec1bcc3e8d6..13142a68ab23 100644
--- a/sdk/keyvault/azure-keyvault-secrets/README.md
+++ b/sdk/keyvault/azure-keyvault-secrets/README.md
@@ -14,6 +14,10 @@ create, manage, and deploy public and private SSL/TLS certificates
[Source code][secret_client_src] | [Package (PyPI)][pypi_package_secrets] | [API reference documentation][reference_docs] | [Product documentation][keyvault_docs] | [Samples][secret_samples]
+## _Disclaimer_
+
+_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_
+
## Getting started
### Install packages
Install [azure-keyvault-secrets][pypi_package_secrets] and
diff --git a/sdk/keyvault/azure-keyvault-secrets/setup.py b/sdk/keyvault/azure-keyvault-secrets/setup.py
index bdcb5e2c6f04..b8e377b75496 100644
--- a/sdk/keyvault/azure-keyvault-secrets/setup.py
+++ b/sdk/keyvault/azure-keyvault-secrets/setup.py
@@ -69,6 +69,7 @@
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
],
zip_safe=False,
diff --git a/sdk/keyvault/test-resources.json b/sdk/keyvault/test-resources.json
index cadd04d8b6d5..9268e5237874 100644
--- a/sdk/keyvault/test-resources.json
+++ b/sdk/keyvault/test-resources.json
@@ -89,6 +89,7 @@
}
},
"variables": {
+ "kvApiVersion": "2019-09-01",
"azureKeyVaultUrl": "[format('https://{0}{1}', parameters('baseName'), parameters('keyVaultDomainSuffix'))]",
"hsmApiVersion": "2021-04-01-preview",
"hsmName": "[concat(parameters('baseName'), 'hsm')]",
@@ -113,7 +114,7 @@
"resources": [
{
"type": "Microsoft.KeyVault/vaults",
- "apiVersion": "2016-10-01",
+ "apiVersion": "[variables('kvApiVersion')]",
"name": "[parameters('baseName')]",
"location": "[parameters('location')]",
"properties": {
@@ -143,7 +144,8 @@
"wrapKey",
"verify",
"sign",
- "purge"
+ "purge",
+ "rotate"
],
"secrets": [
"get",
diff --git a/sdk/metricsadvisor/azure-ai-metricsadvisor/README.md b/sdk/metricsadvisor/azure-ai-metricsadvisor/README.md
index b60a6eb58487..65030e7efb4d 100644
--- a/sdk/metricsadvisor/azure-ai-metricsadvisor/README.md
+++ b/sdk/metricsadvisor/azure-ai-metricsadvisor/README.md
@@ -8,6 +8,10 @@ Metrics Advisor is a scalable real-time time series monitoring, alerting, and ro
[Source code][src_code] | [Package (Pypi)][package] | [API reference documentation][reference_documentation] | [Product documentation][ma_docs] | [Samples][samples_readme]
+## _Disclaimer_
+
+_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_
+
## Getting started
### Install the package
diff --git a/sdk/metricsadvisor/azure-ai-metricsadvisor/setup.py b/sdk/metricsadvisor/azure-ai-metricsadvisor/setup.py
index 76912bfddf36..117821428f55 100644
--- a/sdk/metricsadvisor/azure-ai-metricsadvisor/setup.py
+++ b/sdk/metricsadvisor/azure-ai-metricsadvisor/setup.py
@@ -69,6 +69,7 @@
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/CHANGELOG.md b/sdk/monitor/azure-monitor-opentelemetry-exporter/CHANGELOG.md
index 556d3532b5aa..eff8b60cc8d3 100644
--- a/sdk/monitor/azure-monitor-opentelemetry-exporter/CHANGELOG.md
+++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/CHANGELOG.md
@@ -1,12 +1,14 @@
# Release History
- **Features**
- - Support stamp specific redirect in exporters
- ([#20489](https://github.com/Azure/azure-sdk-for-python/pull/20489))
+## 1.0.0b5 (2021-10-05)
- **Breaking Changes**
- - Change exporter OT to AI mapping fields following common schema
- ([#20445](https://github.com/Azure/azure-sdk-for-python/pull/20445))
+### Features Added
+- Support stamp specific redirect in exporters
+ ([#20489](https://github.com/Azure/azure-sdk-for-python/pull/20489))
+
+### Breaking Changes
+- Change exporter OT to AI mapping fields following common schema
+ ([#20445](https://github.com/Azure/azure-sdk-for-python/pull/20445))
## 1.0.0b4 (2021-04-06)
diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/README.md b/sdk/monitor/azure-monitor-opentelemetry-exporter/README.md
index 7f3e594ccd44..38b856901f16 100644
--- a/sdk/monitor/azure-monitor-opentelemetry-exporter/README.md
+++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/README.md
@@ -21,7 +21,7 @@ To use this package, you must have:
* Azure subscription - [Create a free account][azure_sub]
* Azure Monitor - [How to use application insights][application_insights_namespace]
* Opentelemetry SDK - [Opentelemtry SDK for Python][ot_sdk_python]
-* Python 3.5 or later - [Install Python][python]
+* Python 3.6 or later - [Install Python][python]
### Instantiate the client
diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_version.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_version.py
index 3938d6c3e835..9ed953daa8fe 100644
--- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_version.py
+++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_version.py
@@ -5,4 +5,4 @@
# license information.
# --------------------------------------------------------------------------
-VERSION = "1.0.0b4"
+VERSION = "1.0.0b5"
diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_exporter.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_exporter.py
index 732fe0caa160..73b1e714d5ae 100644
--- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_exporter.py
+++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_exporter.py
@@ -6,6 +6,8 @@
from typing import Sequence, Any
from urllib.parse import urlparse
+from opentelemetry.semconv.resource import ResourceAttributes
+from opentelemetry.semconv.trace import DbSystemValues, SpanAttributes
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
from opentelemetry.sdk.util import ns_to_iso_str
from opentelemetry.trace import Span, SpanKind
@@ -91,9 +93,9 @@ def _convert_span_to_envelope(span: Span) -> TelemetryItem:
time=ns_to_iso_str(span.start_time),
)
if span.resource and span.resource.attributes:
- service_name = span.resource.attributes.get("service.name")
- service_namespace = span.resource.attributes.get("service.namespace")
- service_instance_id = span.resource.attributes.get("service.instance.id")
+ service_name = span.resource.attributes.get(ResourceAttributes.SERVICE_NAME)
+ service_namespace = span.resource.attributes.get(ResourceAttributes.SERVICE_NAMESPACE)
+ service_instance_id = span.resource.attributes.get(ResourceAttributes.SERVICE_INSTANCE_ID)
if service_name:
if service_namespace:
envelope.tags["ai.cloud.role"] = service_namespace + \
@@ -106,8 +108,8 @@ def _convert_span_to_envelope(span: Span) -> TelemetryItem:
envelope.tags["ai.cloud.roleInstance"] = platform.node() # hostname default
envelope.tags["ai.internal.nodeName"] = envelope.tags["ai.cloud.roleInstance"]
envelope.tags["ai.operation.id"] = "{:032x}".format(span.context.trace_id)
- if "enduser.id" in span.attributes:
- envelope.tags["ai.user.id"] = span.attributes["enduser.id"]
+ if SpanAttributes.ENDUSER_ID in span.attributes:
+ envelope.tags["ai.user.id"] = span.attributes[SpanAttributes.ENDUSER_ID]
if span.parent and span.parent.span_id:
envelope.tags["ai.operation.parentId"] = "{:016x}".format(
span.parent.span_id
@@ -116,7 +118,7 @@ def _convert_span_to_envelope(span: Span) -> TelemetryItem:
if span.kind in (SpanKind.CONSUMER, SpanKind.SERVER):
envelope.name = "Microsoft.ApplicationInsights.Request"
data = RequestData(
- name=span.name[:1024], # Breeze max length
+ name=span.name,
id="{:016x}".format(span.context.span_id),
duration=_utils.ns_to_duration(span.end_time - span.start_time),
response_code="0",
@@ -124,86 +126,105 @@ def _convert_span_to_envelope(span: Span) -> TelemetryItem:
properties={},
)
envelope.data = MonitorBase(base_data=data, base_type="RequestData")
- if "http.method" in span.attributes: # HTTP
- envelope.tags["ai.operation.name"] = "{} {}".format(
- span.attributes["http.method"],
- span.name,
- )
- data.properties["request.name"] = data.name
+ if SpanAttributes.HTTP_METHOD in span.attributes: # HTTP
url = ""
- if "http.user_agent" in span.attributes:
+ path = ""
+ if SpanAttributes.HTTP_USER_AGENT in span.attributes:
# TODO: Not exposed in Swagger, need to update def
- envelope.tags["ai.user.userAgent"] = span.attributes["http.user_agent"]
- if "http.client_ip" in span.attributes:
- envelope.tags["ai.location.ip"] = span.attributes["http.client_ip"]
- elif "net.peer.ip" in span.attributes:
- envelope.tags["ai.location.ip"] = span.attributes["net.peer.ip"]
+ envelope.tags["ai.user.userAgent"] = span.attributes[SpanAttributes.HTTP_USER_AGENT]
+ if SpanAttributes.HTTP_CLIENT_IP in span.attributes:
+ envelope.tags["ai.location.ip"] = span.attributes[SpanAttributes.HTTP_CLIENT_IP]
+ elif SpanAttributes.NET_PEER_IP in span.attributes:
+ envelope.tags["ai.location.ip"] = span.attributes[SpanAttributes.NET_PEER_IP]
# url
- if "http.url" in span.attributes:
- url = span.attributes["http.url"]
- elif "http.scheme" in span.attributes and "http.target" in span.attributes:
- scheme = span.attributes["http.scheme"]
- http_target = span.attributes["http.target"]
- if "http.host" in span.attributes:
+ if SpanAttributes.HTTP_URL in span.attributes:
+ url = span.attributes[SpanAttributes.HTTP_URL]
+ elif SpanAttributes.HTTP_SCHEME in span.attributes and SpanAttributes.HTTP_TARGET in span.attributes:
+ scheme = span.attributes[SpanAttributes.HTTP_SCHEME]
+ http_target = span.attributes[SpanAttributes.HTTP_TARGET]
+ if SpanAttributes.HTTP_HOST in span.attributes:
url = "{}://{}{}".format(
scheme,
- span.attributes["http.host"],
+ span.attributes[SpanAttributes.HTTP_HOST],
http_target,
)
- elif "net.host.port" in span.attributes:
- host_port = span.attributes["net.host.port"]
- if "http.server_name" in span.attributes:
- server_name = span.attributes["http.server_name"]
+ elif SpanAttributes.NET_HOST_PORT in span.attributes:
+ host_port = span.attributes[SpanAttributes.NET_HOST_PORT]
+ if SpanAttributes.HTTP_SERVER_NAME in span.attributes:
+ server_name = span.attributes[SpanAttributes.HTTP_SERVER_NAME]
url = "{}://{}:{}{}".format(
scheme,
server_name,
host_port,
http_target,
)
- elif "net.host.name" in span.attributes:
- host_name = span.attributes["net.host.name"]
+ elif SpanAttributes.NET_HOST_NAME in span.attributes:
+ host_name = span.attributes[SpanAttributes.NET_HOST_NAME]
url = "{}://{}:{}{}".format(
scheme,
host_name,
host_port,
http_target,
)
- if url:
- url = url[:2048] # Breeze max length
data.url = url
- data.properties["request.url"] = url
- if "http.status_code" in span.attributes:
- status_code = span.attributes["http.status_code"]
+ # Http specific logic for ai.operation.name
+ if SpanAttributes.HTTP_ROUTE in span.attributes:
+ envelope.tags["ai.operation.name"] = "{} {}".format(
+ span.attributes[SpanAttributes.HTTP_METHOD],
+ span.attributes[SpanAttributes.HTTP_ROUTE],
+ )
+ elif url:
+ try:
+ parse_url = urlparse(url)
+ path = parse_url.path
+ if not path:
+ path = "/"
+ envelope.tags["ai.operation.name"] = "{} {}".format(
+ span.attributes[SpanAttributes.HTTP_METHOD],
+ path,
+ )
+ except Exception: # pylint: disable=broad-except
+ pass
+ else:
+ envelope.tags["ai.operation.name"] = span.name
+ if SpanAttributes.HTTP_STATUS_CODE in span.attributes:
+ status_code = span.attributes[SpanAttributes.HTTP_STATUS_CODE]
data.response_code = str(status_code)
- elif "messaging.system" in span.attributes: # Messaging
+ elif SpanAttributes.MESSAGING_SYSTEM in span.attributes: # Messaging
envelope.tags["ai.operation.name"] = span.name
- if "net.peer.ip" in span.attributes:
- envelope.tags["ai.location.ip"] = span.attributes["net.peer.ip"]
- if "messaging.destination" in span.attributes:
- if "net.peer.name" in span.attributes:
+ if SpanAttributes.NET_PEER_IP in span.attributes:
+ envelope.tags["ai.location.ip"] = span.attributes[SpanAttributes.NET_PEER_IP]
+ if SpanAttributes.MESSAGING_DESTINATION in span.attributes:
+ if SpanAttributes.NET_PEER_NAME in span.attributes:
data.properties["source"] = "{}/{}".format(
- span.attributes["net.peer.name"],
- span.attributes["messaging.destination"],
+ span.attributes[SpanAttributes.NET_PEER_NAME],
+ span.attributes[SpanAttributes.MESSAGING_DESTINATION],
)
- elif "net.peer.ip" in span.attributes:
+ elif SpanAttributes.NET_PEER_IP in span.attributes:
data.properties["source"] = "{}/{}".format(
- span.attributes["net.peer.ip"],
- span.attributes["messaging.destination"],
+ span.attributes[SpanAttributes.NET_PEER_IP],
+ span.attributes[SpanAttributes.MESSAGING_DESTINATION],
)
else:
- data.properties["source"] = span.attributes["messaging.destination"]
+ data.properties["source"] = span.attributes[SpanAttributes.MESSAGING_DESTINATION]
else: # Other
envelope.tags["ai.operation.name"] = span.name
- if "net.peer.ip" in span.attributes:
- envelope.tags["ai.location.ip"] = span.attributes["net.peer.ip"]
- data.response_code = data.response_code[:1024] # Breeze max length
+ if SpanAttributes.NET_PEER_IP in span.attributes:
+ envelope.tags["ai.location.ip"] = span.attributes[SpanAttributes.NET_PEER_IP]
+ # Apply truncation
+ if data.url:
+ data.url = data.url[:2048] # Breeze max length
+ if data.response_code:
+ data.response_code = data.response_code[:1024] # Breeze max length
+ if envelope.tags["ai.operation.name"]:
+ data.name = envelope.tags["ai.operation.name"][:1024] # Breeze max length
else: # INTERNAL, CLIENT, PRODUCER
envelope.name = "Microsoft.ApplicationInsights.RemoteDependency"
# TODO: ai.operation.name for non-server spans
data = RemoteDependencyData(
- name=span.name[:1024], # Breeze max length
+ name=span.name,
id="{:016x}".format(span.context.span_id),
- result_code=str(span.status.status_code.value),
+ result_code="0",
duration=_utils.ns_to_duration(span.end_time - span.start_time),
success=span.status.is_ok,
properties={},
@@ -212,110 +233,123 @@ def _convert_span_to_envelope(span: Span) -> TelemetryItem:
base_data=data, base_type="RemoteDependencyData"
)
target = None
- if "peer.service" in span.attributes:
- target = span.attributes["peer.service"]
+ if SpanAttributes.PEER_SERVICE in span.attributes:
+ target = span.attributes[SpanAttributes.PEER_SERVICE]
else:
- if "net.peer.name" in span.attributes:
- target = span.attributes["net.peer.name"]
- elif "net.peer.ip" in span.attributes:
- target = span.attributes["net.peer.ip"]
- if "net.peer.port" in span.attributes:
- port = span.attributes["net.peer.port"]
+ if SpanAttributes.NET_PEER_NAME in span.attributes:
+ target = span.attributes[SpanAttributes.NET_PEER_NAME]
+ elif SpanAttributes.NET_PEER_IP in span.attributes:
+ target = span.attributes[SpanAttributes.NET_PEER_IP]
+ if SpanAttributes.NET_PEER_PORT in span.attributes:
+ port = span.attributes[SpanAttributes.NET_PEER_PORT]
# TODO: check default port for rpc
# This logic assumes default ports never conflict across dependency types
- if port != _get_default_port_http(span.attributes.get("http.scheme")) and \
- port != _get_default_port_db(span.attributes.get("db.system")):
+ if port != _get_default_port_http(span.attributes.get(SpanAttributes.HTTP_SCHEME)) and \
+ port != _get_default_port_db(span.attributes.get(SpanAttributes.DB_SYSTEM)):
target = "{}:{}".format(target, port)
if span.kind is SpanKind.CLIENT:
- if "http.method" in span.attributes: # HTTP
+ if SpanAttributes.HTTP_METHOD in span.attributes: # HTTP
data.type = "HTTP"
- if "http.user_agent" in span.attributes:
+ if SpanAttributes.HTTP_USER_AGENT in span.attributes:
# TODO: Not exposed in Swagger, need to update def
- envelope.tags["ai.user.userAgent"] = span.attributes["http.user_agent"]
- scheme = span.attributes.get("http.scheme")
- url = ""
- # Target
- if "http.url" in span.attributes:
- url = span.attributes["http.url"]
- # http specific logic for target
- if "peer.service" not in span.attributes:
- try:
- parse_url = urlparse(url)
- if parse_url.port == _get_default_port_http(scheme):
- target = parse_url.hostname
- else:
- target = parse_url.netloc
- except Exception: # pylint: disable=broad-except
- logger.warning("Error while parsing url.")
- # http specific logic for target
- if "peer.service" not in span.attributes and "http.host" in span.attributes:
- host = span.attributes["http.host"]
- try:
- # urlparse insists on absolute URLs starting with "//"
- # This logic assumes host does not include a "//"
- host_name = urlparse("//" + host)
- if host_name.port == _get_default_port_http(scheme):
- target = host_name.hostname
- else:
- target = host
- except Exception: # pylint: disable=broad-except
- logger.warning("Error while parsing hostname.")
+ envelope.tags["ai.user.userAgent"] = span.attributes[SpanAttributes.HTTP_USER_AGENT]
+ scheme = span.attributes.get(SpanAttributes.HTTP_SCHEME)
# url
- if not url:
- if scheme and "http.target" in span.attributes:
- http_target = span.attributes["http.target"]
- if "http.host" in span.attributes:
- url = "{}://{}{}".format(
+ url = ""
+ if SpanAttributes.HTTP_URL in span.attributes:
+ url = span.attributes[SpanAttributes.HTTP_URL]
+ elif scheme and SpanAttributes.HTTP_TARGET in span.attributes:
+ http_target = span.attributes[SpanAttributes.HTTP_TARGET]
+ if SpanAttributes.HTTP_HOST in span.attributes:
+ url = "{}://{}{}".format(
+ scheme,
+ span.attributes[SpanAttributes.HTTP_HOST],
+ http_target,
+ )
+ elif SpanAttributes.NET_PEER_PORT in span.attributes:
+ peer_port = span.attributes[SpanAttributes.NET_PEER_PORT]
+ if SpanAttributes.NET_PEER_NAME in span.attributes:
+ peer_name = span.attributes[SpanAttributes.NET_PEER_NAME]
+ url = "{}://{}:{}{}".format(
scheme,
- span.attributes["http.host"],
+ peer_name,
+ peer_port,
http_target,
)
- elif "net.peer.port" in span.attributes:
- peer_port = span.attributes["net.peer.port"]
- if "net.peer.name" in span.attributes:
- peer_name = span.attributes["net.peer.name"]
- url = "{}://{}:{}{}".format(
- scheme,
- peer_name,
- peer_port,
- http_target,
- )
- elif "net.peer.ip" in span.attributes:
- peer_ip = span.attributes["net.peer.ip"]
- url = "{}://{}:{}{}".format(
- scheme,
- peer_ip,
- peer_port,
- http_target,
- )
+ elif SpanAttributes.NET_PEER_IP in span.attributes:
+ peer_ip = span.attributes[SpanAttributes.NET_PEER_IP]
+ url = "{}://{}:{}{}".format(
+ scheme,
+ peer_ip,
+ peer_port,
+ http_target,
+ )
+ target_from_url = ""
+ path = ""
+ if url:
+ try:
+ parse_url = urlparse(url)
+ path = parse_url.path
+ if not path:
+ path = "/"
+ if parse_url.port == _get_default_port_http(scheme):
+ target_from_url = parse_url.hostname
+ else:
+ target_from_url = parse_url.netloc
+ except Exception: # pylint: disable=broad-except
+ pass
+ # http specific logic for name
+ if path:
+ data.name = "{} {}".format(
+ span.attributes[SpanAttributes.HTTP_METHOD],
+ path,
+ )
+ # http specific logic for target
+ if SpanAttributes.PEER_SERVICE not in span.attributes:
+ if SpanAttributes.HTTP_HOST in span.attributes:
+ host = span.attributes[SpanAttributes.HTTP_HOST]
+ try:
+ # urlparse insists on absolute URLs starting with "//"
+ # This logic assumes host does not include a "//"
+ host_name = urlparse("//" + host)
+ if host_name.port == _get_default_port_http(scheme):
+ target = host_name.hostname
+ else:
+ target = host
+ except Exception: # pylint: disable=broad-except
+ logger.warning("Error while parsing hostname.")
+ elif target_from_url:
+ target = target_from_url
# data is url
data.data = url
- if "http.status_code" in span.attributes:
- status_code = span.attributes["http.status_code"]
+ if SpanAttributes.HTTP_STATUS_CODE in span.attributes:
+ status_code = span.attributes[SpanAttributes.HTTP_STATUS_CODE]
data.result_code = str(status_code)
- elif "db.system" in span.attributes: # Database
- db_system = span.attributes["db.system"]
- if _is_relational_db(db_system):
- data.type = "SQL"
- else:
+ elif SpanAttributes.DB_SYSTEM in span.attributes: # Database
+ db_system = span.attributes[SpanAttributes.DB_SYSTEM]
+ if not _is_sql_db(db_system):
data.type = db_system
- # data is the full statement
- if "db.statement" in span.attributes:
- data.data = span.attributes["db.statement"]
+ else:
+ data.type = "SQL"
+ # data is the full statement or operation
+ if SpanAttributes.DB_STATEMENT in span.attributes:
+ data.data = span.attributes[SpanAttributes.DB_STATEMENT]
+ elif SpanAttributes.DB_OPERATION in span.attributes:
+ data.data = span.attributes[SpanAttributes.DB_OPERATION]
# db specific logic for target
- if "db.name" in span.attributes:
- db_name = span.attributes["db.name"]
+ if SpanAttributes.DB_NAME in span.attributes:
+ db_name = span.attributes[SpanAttributes.DB_NAME]
if target is None:
target = db_name
else:
- target = "{}/{}".format(target, db_name)
+ target = "{}|{}".format(target, db_name)
if target is None:
target = db_system
- elif "rpc.system" in span.attributes: # Rpc
- data.type = "rpc.system"
+ elif SpanAttributes.RPC_SYSTEM in span.attributes: # Rpc
+ data.type = SpanAttributes.RPC_SYSTEM
# TODO: data.data for rpc
if target is None:
- target = span.attributes["rpc.system"]
+ target = span.attributes[SpanAttributes.RPC_SYSTEM]
else:
# TODO: Azure specific types
data.type = "N/A"
@@ -334,6 +368,8 @@ def _convert_span_to_envelope(span: Span) -> TelemetryItem:
data.data = data.data[:8192]
if target:
data.target = target[:1024]
+ if data.name:
+ data.name = data.name[:1024]
for key, val in span.attributes.items():
# Remove Opentelemetry related span attributes from custom dimensions
if key.startswith("http.") or \
@@ -361,16 +397,28 @@ def _convert_span_to_envelope(span: Span) -> TelemetryItem:
return envelope
+# pylint:disable=too-many-return-statements
def _get_default_port_db(dbsystem):
- if dbsystem == "postgresql":
+ if dbsystem == DbSystemValues.POSTGRESQL.value:
return 5432
- if dbsystem == "mysql":
+ if dbsystem == DbSystemValues.CASSANDRA.value:
+ return 9042
+ if dbsystem in (DbSystemValues.MARIADB.value, DbSystemValues.MYSQL.value):
return 3306
+ if dbsystem == DbSystemValues.MSSQL.value:
+ return 1433
+ # TODO: Add in memcached
if dbsystem == "memcached":
return 11211
- if dbsystem == "mongodb":
- return 27017
- if dbsystem == "redis":
+ if dbsystem == DbSystemValues.DB2.value:
+ return 50000
+ if dbsystem == DbSystemValues.ORACLE.value:
+ return 1521
+ if dbsystem == DbSystemValues.H2.value:
+ return 8082
+ if dbsystem == DbSystemValues.DERBY.value:
+ return 1527
+ if dbsystem == DbSystemValues.REDIS.value:
return 6379
return 0
@@ -383,5 +431,15 @@ def _get_default_port_http(scheme):
return 0
-def _is_relational_db(dbsystem):
- return dbsystem in ["postgresql", "mysql"]
+def _is_sql_db(dbsystem):
+ return dbsystem in (
+ DbSystemValues.DB2.value,
+ DbSystemValues.DERBY.value,
+ DbSystemValues.MARIADB.value,
+ DbSystemValues.MSSQL.value,
+ DbSystemValues.ORACLE.value,
+ DbSystemValues.SQLITE.value,
+ DbSystemValues.OTHER_SQL.value,
+ DbSystemValues.HSQLDB.value,
+ DbSystemValues.H2.value,
+ )
diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_trace.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_trace.py
index cf58f037345b..3bc7ffc523a6 100644
--- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_trace.py
+++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_trace.py
@@ -243,7 +243,7 @@ def test_span_to_envelope_client_http(self):
envelope.name, "Microsoft.ApplicationInsights.RemoteDependency"
)
self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z")
- self.assertEqual(envelope.data.base_data.name, "test")
+ self.assertEqual(envelope.data.base_data.name, "GET /wiki/Rabbit")
self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9")
self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001")
self.assertTrue(envelope.data.base_data.success)
@@ -258,6 +258,15 @@ def test_span_to_envelope_client_http(self):
self.assertEqual(envelope.data.base_data.result_code, "200")
self.assertEqual(envelope.tags["ai.user.userAgent"], "agent")
+ # Name empty
+ span._attributes = {
+ "http.method": "GET",
+ "http.scheme": "https",
+ "http.url": "https://www.example.com",
+ }
+ envelope = exporter._span_to_envelope(span)
+ self.assertEqual(envelope.data.base_data.name, "GET /")
+
# Target
span._attributes = {
"http.method": "GET",
@@ -339,7 +348,7 @@ def test_span_to_envelope_client_db(self):
attributes={
"db.system": "postgresql",
"peer.service": "service",
- "db.statement": "SELECT",
+ "db.statement": "SELECT * from test",
},
kind=SpanKind.CLIENT,
)
@@ -358,10 +367,19 @@ def test_span_to_envelope_client_db(self):
self.assertTrue(envelope.data.base_data.success)
self.assertEqual(envelope.data.base_type, "RemoteDependencyData")
- self.assertEqual(envelope.data.base_data.type, "SQL")
+ self.assertEqual(envelope.data.base_data.type, "postgresql")
self.assertEqual(envelope.data.base_data.target, "service")
+ self.assertEqual(envelope.data.base_data.data, "SELECT * from test")
+ self.assertEqual(envelope.data.base_data.result_code, "0")
+
+ # data
+ span._attributes = {
+ "db.system": "postgresql",
+ "peer.service": "service",
+ "db.operation": "SELECT",
+ }
+ envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.data, "SELECT")
- self.assertEqual(envelope.data.base_data.result_code, "1")
# Target
span._attributes = {
@@ -371,7 +389,7 @@ def test_span_to_envelope_client_db(self):
"peer.service": "service",
}
envelope = exporter._span_to_envelope(span)
- self.assertEqual(envelope.data.base_data.target, "service/testDb")
+ self.assertEqual(envelope.data.base_data.target, "service|testDb")
span._attributes = {
"db.system": "postgresql",
@@ -387,6 +405,16 @@ def test_span_to_envelope_client_db(self):
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.target, "postgresql")
+ # Type
+ span._attributes = {
+ "db.system": "mssql",
+ "db.statement": "SELECT",
+ "db.name": "testDb",
+ "peer.service": "service",
+ }
+ envelope = exporter._span_to_envelope(span)
+ self.assertEqual(envelope.data.base_data.type, "SQL")
+
def test_span_to_envelope_client_rpc(self):
exporter = self._exporter
start_time = 1575494316027613500
@@ -435,7 +463,7 @@ def test_span_to_envelope_client_rpc(self):
# TODO: data.data
# self.assertEqual(envelope.data.base_data.data, "SELECT")
- self.assertEqual(envelope.data.base_data.result_code, "1")
+ self.assertEqual(envelope.data.base_data.result_code, "0")
def test_span_to_envelope_producer_messaging(self):
exporter = self._exporter
@@ -477,7 +505,7 @@ def test_span_to_envelope_producer_messaging(self):
# self.assertEqual(envelope.data.base_data.target, "rpc")
# TODO: data.data
# self.assertEqual(envelope.data.base_data.data, "SELECT")
- self.assertEqual(envelope.data.base_data.result_code, "1")
+ self.assertEqual(envelope.data.base_data.result_code, "0")
def test_span_to_envelope_internal(self):
exporter = self._exporter
@@ -513,7 +541,7 @@ def test_span_to_envelope_internal(self):
self.assertEqual(envelope.data.base_type, "RemoteDependencyData")
self.assertEqual(envelope.data.base_data.type, "InProc")
- self.assertEqual(envelope.data.base_data.result_code, "1")
+ self.assertEqual(envelope.data.base_data.result_code, "0")
# type
span._parent = None
@@ -552,18 +580,16 @@ def test_span_envelope_server_http(self):
envelope.name, "Microsoft.ApplicationInsights.Request"
)
self.assertEqual(envelope.data.base_type, "RequestData")
- self.assertEqual(envelope.data.base_data.name, "test")
+ self.assertEqual(envelope.data.base_data.name, "GET /wiki/Rabbit")
self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9")
self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001")
self.assertEqual(envelope.data.base_data.response_code, "200")
self.assertTrue(envelope.data.base_data.success)
- self.assertEqual(envelope.tags["ai.operation.name"], "GET test")
+ self.assertEqual(envelope.tags["ai.operation.name"], "GET /wiki/Rabbit")
self.assertEqual(envelope.tags["ai.user.userAgent"], "agent")
self.assertEqual(envelope.tags["ai.location.ip"], "client_ip")
self.assertEqual(envelope.data.base_data.url, "https://www.wikipedia.org/wiki/Rabbit")
- self.assertEqual(envelope.data.base_data.properties["request.name"], "test")
- self.assertEqual(envelope.data.base_data.properties["request.url"], "https://www.wikipedia.org/wiki/Rabbit")
# location
span._attributes = {
@@ -603,6 +629,22 @@ def test_span_envelope_server_http(self):
envelope = exporter._span_to_envelope(span)
self.assertEqual(envelope.data.base_data.url, "https://localhost:35555/path")
+ # ai.operation.name
+ span._attributes = {
+ "http.method": "GET",
+ "http.url": "https://www.wikipedia.org/wiki/Rabbit/test",
+ }
+ envelope = exporter._span_to_envelope(span)
+ self.assertEqual(envelope.tags["ai.operation.name"], "GET /wiki/Rabbit/test")
+ self.assertEqual(envelope.data.base_data.name, "GET /wiki/Rabbit/test")
+
+ span._attributes = {
+ "http.method": "GET",
+ }
+ envelope = exporter._span_to_envelope(span)
+ self.assertEqual(envelope.tags["ai.operation.name"], "test")
+ self.assertEqual(envelope.data.base_data.name, "test")
+
def test_span_envelope_server_messaging(self):
exporter = self._exporter
start_time = 1575494316027613500
diff --git a/sdk/monitor/azure-monitor-query/CHANGELOG.md b/sdk/monitor/azure-monitor-query/CHANGELOG.md
index 0969530e1cbe..9079858da9fe 100644
--- a/sdk/monitor/azure-monitor-query/CHANGELOG.md
+++ b/sdk/monitor/azure-monitor-query/CHANGELOG.md
@@ -1,6 +1,6 @@
# Release History
-## 1.0.0b5 (Unreleased)
+## 1.0.0b5 (2021-10-05)
### Features Added
@@ -9,6 +9,7 @@
- Added `LogsQueryStatus` Enum to describe the status of a result.
- Added a new `LogsTableRow` type that represents a single row in a table.
- Items in `metrics` list in `MetricsResult` can now be accessed by metric names.
+- Added `audience` keyword to support providing credential scope when creating clients.
### Breaking Changes
@@ -19,10 +20,6 @@
- `query_batch` API now returns a union of `LogsQueryPartialResult`, `LogsQueryError` and `LogsQueryResult`.
- `metric_namespace` is renamed to `namespace` and is a keyword-only argument in `list_metric_definitions` API.
-### Bugs Fixed
-
-### Other Changes
-
## 1.0.0b4 (2021-09-09)
### Features Added
diff --git a/sdk/monitor/azure-monitor-query/README.md b/sdk/monitor/azure-monitor-query/README.md
index 4f1677bd6807..2dbac055aee6 100644
--- a/sdk/monitor/azure-monitor-query/README.md
+++ b/sdk/monitor/azure-monitor-query/README.md
@@ -14,6 +14,10 @@ The Azure Monitor Query client library is used to execute read-only queries agai
- [Samples][samples]
- [Change log][changelog]
+## _Disclaimer_
+
+_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_
+
## Getting started
### Prerequisites
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/_helpers.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/_helpers.py
index 4fb0563fd3da..b23fcd8ec0c8 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/_helpers.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/_helpers.py
@@ -15,16 +15,19 @@
def get_authentication_policy(
- credential, # type: TokenCredential
+ credential, # type: "TokenCredential"
+ audience=None # type: str
):
# type: (...) -> BearerTokenCredentialPolicy
"""Returns the correct authentication policy"""
-
+ if not audience:
+ audience = "https://api.loganalytics.io/"
+ scope = audience.rstrip('/') + "/.default"
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if hasattr(credential, "get_token"):
return BearerTokenCredentialPolicy(
- credential, "https://api.loganalytics.io/.default"
+ credential, scope
)
raise TypeError("Unsupported credential")
@@ -32,15 +35,18 @@ def get_authentication_policy(
def get_metrics_authentication_policy(
credential, # type: TokenCredential
+ audience=None # type: str
):
# type: (...) -> BearerTokenCredentialPolicy
"""Returns the correct authentication policy"""
-
+ if not audience:
+ audience = "https://management.azure.com/"
+ scope = audience.rstrip('/') + "/.default"
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if hasattr(credential, "get_token"):
return BearerTokenCredentialPolicy(
- credential, "https://management.azure.com/.default"
+ credential, scope
)
raise TypeError("Unsupported credential")
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py
index 1bc32db07d39..24db36531e8b 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py
@@ -49,15 +49,20 @@ class LogsQueryClient(object):
:type credential: ~azure.core.credentials.TokenCredential
:keyword endpoint: The endpoint to connect to. Defaults to 'https://api.loganalytics.io'.
:paramtype endpoint: str
+ :keyword audience: URL to use for credential authentication with AAD.
+ :paramtype audience: str
"""
def __init__(self, credential, **kwargs):
# type: (TokenCredential, Any) -> None
-
- self._endpoint = kwargs.pop("endpoint", "https://api.loganalytics.io/v1")
+ audience = kwargs.pop("audience", None)
+ endpoint = kwargs.pop("endpoint", "https://api.loganalytics.io/v1")
+ if not endpoint.startswith("https://") and not endpoint.startswith("http://"):
+ endpoint = "https://" + endpoint
+ self._endpoint = endpoint
self._client = MonitorQueryClient(
credential=credential,
- authentication_policy=get_authentication_policy(credential),
+ authentication_policy=get_authentication_policy(credential, audience),
base_url=self._endpoint,
**kwargs
)
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py
index 171d0b495a3c..e730db1c7f26 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py
@@ -44,15 +44,21 @@ class MetricsQueryClient(object):
:type credential: ~azure.core.credentials.TokenCredential
:keyword endpoint: The endpoint to connect to. Defaults to 'https://management.azure.com'.
:paramtype endpoint: str
+ :keyword audience: URL to use for credential authentication with AAD.
+ :paramtype audience: str
"""
def __init__(self, credential, **kwargs):
# type: (TokenCredential, Any) -> None
+ audience = kwargs.pop("audience", None)
endpoint = kwargs.pop("endpoint", "https://management.azure.com")
+ if not endpoint.startswith("https://") and not endpoint.startswith("http://"):
+ endpoint = "https://" + endpoint
+ self._endpoint = endpoint
self._client = MonitorQueryClient(
credential=credential,
- base_url=endpoint,
- authentication_policy=get_metrics_authentication_policy(credential),
+ base_url=self._endpoint,
+ authentication_policy=get_metrics_authentication_policy(credential, audience),
**kwargs
)
self._metrics_op = self._client.metrics
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_helpers_asyc.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_helpers_asyc.py
index ffecbec48927..033b1c3fc585 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_helpers_asyc.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_helpers_asyc.py
@@ -13,14 +13,17 @@
def get_authentication_policy(
credential: "AsyncTokenCredential",
+ audience: str = None
) -> AsyncBearerTokenCredentialPolicy:
"""Returns the correct authentication policy"""
-
+ if not audience:
+ audience = "https://api.loganalytics.io/"
+ scope = audience.rstrip('/') + "/.default"
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if hasattr(credential, "get_token"):
return AsyncBearerTokenCredentialPolicy(
- credential, "https://api.loganalytics.io/.default"
+ credential, scope
)
raise TypeError("Unsupported credential")
@@ -28,14 +31,17 @@ def get_authentication_policy(
def get_metrics_authentication_policy(
credential: "AsyncTokenCredential",
+ audience: str = None
) -> AsyncBearerTokenCredentialPolicy:
"""Returns the correct authentication policy"""
-
+ if not audience:
+ audience = "https://management.azure.com/"
+ scope = audience.rstrip('/') + "/.default"
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if hasattr(credential, "get_token"):
return AsyncBearerTokenCredentialPolicy(
- credential, "https://management.azure.com/.default"
+ credential, scope
)
raise TypeError("Unsupported credential")
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py
index 92379c5d6442..4e4df78df924 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py
@@ -35,13 +35,19 @@ class LogsQueryClient(object):
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:keyword endpoint: The endpoint to connect to. Defaults to 'https://api.loganalytics.io/v1'.
:paramtype endpoint: str
+ :keyword audience: URL to use for credential authentication with AAD.
+ :paramtype audience: str
"""
def __init__(self, credential: "AsyncTokenCredential", **kwargs: Any) -> None:
- self._endpoint = kwargs.pop("endpoint", "https://api.loganalytics.io/v1")
+ audience = kwargs.pop("audience", None)
+ endpoint = kwargs.pop("endpoint", "https://api.loganalytics.io/v1")
+ if not endpoint.startswith("https://") and not endpoint.startswith("http://"):
+ endpoint = "https://" + endpoint
+ self._endpoint = endpoint
self._client = MonitorQueryClient(
credential=credential,
- authentication_policy=get_authentication_policy(credential),
+ authentication_policy=get_authentication_policy(credential, audience),
base_url=self._endpoint,
**kwargs
)
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py
index e9345743d686..707272d39ac4 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py
@@ -32,14 +32,20 @@ class MetricsQueryClient(object):
:type credential: ~azure.core.credentials.TokenCredential
:keyword endpoint: The endpoint to connect to. Defaults to 'https://management.azure.com'.
:paramtype endpoint: str
+ :keyword audience: URL to use for credential authentication with AAD.
+ :paramtype audience: str
"""
def __init__(self, credential: "AsyncTokenCredential", **kwargs: Any) -> None:
+ audience = kwargs.pop("audience", None)
endpoint = kwargs.pop("endpoint", "https://management.azure.com")
+ if not endpoint.startswith("https://") and not endpoint.startswith("http://"):
+ endpoint = "https://" + endpoint
+ self._endpoint = endpoint
self._client = MonitorQueryClient(
credential=credential,
- base_url=endpoint,
- authentication_policy=get_metrics_authentication_policy(credential),
+ base_url=self._endpoint,
+ authentication_policy=get_metrics_authentication_policy(credential, audience),
**kwargs
)
self._metrics_op = self._client.metrics
diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/CHANGELOG.md b/sdk/schemaregistry/azure-schemaregistry-avroserializer/CHANGELOG.md
index a24aa8e035c3..7245b36e68ab 100644
--- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/CHANGELOG.md
+++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/CHANGELOG.md
@@ -4,19 +4,20 @@
### Features Added
-- `auto_register_schemas` keyword argument has been added to `SchemaRegistryAvroSerializer`, which will allow for automatically registering schemas passed in to the `serialize`.
-- `value` parameter in `serialize` on `SchemaRegistryAvroSerializer` takes type `Mapping` rather than `Dict`.
+- `auto_register_schemas` keyword argument has been added to `AvroSerializer`, which will allow for automatically registering schemas passed in to the `serialize`.
+- `value` parameter in `serialize` on `AvroSerializer` takes type `Mapping` rather than `Dict`.
### Breaking Changes
-- `schema_registry` parameter in the `SchemaRegistryAvroSerializer` constructor has been renamed `client`.
-- `schema_group` parameter in the `SchemaRegistryAvroSerializer` constructor has been renamed `group_name`.
-- `data` parameter in the `serialize` and `deserialize` methods on `SchemaRegistryAvroSerializer` has been renamed `value`.
-- `schema` parameter in the `serialize` method on `SchemaRegistryAvroSerializer` no longer accepts argument of type `bytes`.
-- `SchemaRegistryAvroSerializer` constructor no longer takes in the `codec` keyword argument.
+- `SchemaRegistryAvroSerializer` has been renamed `AvroSerializer`.
+- `schema_registry` parameter in the `AvroSerializer` constructor has been renamed `client`.
+- `schema_group` parameter in the `AvroSerializer` constructor has been renamed `group_name`.
+- `data` parameter in the `serialize` and `deserialize` methods on `AvroSerializer` has been renamed `value`.
+- `schema` parameter in the `serialize` method on `AvroSerializer` no longer accepts argument of type `bytes`.
+- `AvroSerializer` constructor no longer takes in the `codec` keyword argument.
- The following positional arguments are now required keyword arguments:
- - `client` and `group_name` in `SchemaRegistryAvroSerializer` constructor
- - `schema` in `serialize` on `SchemaRegistryAvroSerializer`
+ - `client` and `group_name` in `AvroSerializer` constructor
+ - `schema` in `serialize` on `AvroSerializer`
### Bugs Fixed
diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/__init__.py b/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/__init__.py
index fe999769d03e..c9a4c0074933 100644
--- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/__init__.py
+++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/__init__.py
@@ -27,8 +27,8 @@
__version__ = VERSION
-from ._schema_registry_avro_serializer import SchemaRegistryAvroSerializer
+from ._schema_registry_avro_serializer import AvroSerializer
__all__ = [
- "SchemaRegistryAvroSerializer"
+ "AvroSerializer"
]
diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/_schema_registry_avro_serializer.py b/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/_schema_registry_avro_serializer.py
index f316fa9d1f07..cbd6c1aefede 100644
--- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/_schema_registry_avro_serializer.py
+++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/azure/schemaregistry/serializer/avroserializer/_schema_registry_avro_serializer.py
@@ -35,9 +35,9 @@
from ._avro_serializer import AvroObjectSerializer
-class SchemaRegistryAvroSerializer(object):
+class AvroSerializer(object):
"""
- SchemaRegistryAvroSerializer provides the ability to serialize and deserialize data according
+ AvroSerializer provides the ability to serialize and deserialize data according
to the given avro schema. It would automatically register, get and cache the schema.
:keyword client: Required. The schema registry client
diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/avro_serializer.py b/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/avro_serializer.py
index a31e4a7b88ee..b93ebd71d450 100644
--- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/avro_serializer.py
+++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/avro_serializer.py
@@ -27,7 +27,7 @@
from azure.identity import ClientSecretCredential
from azure.schemaregistry import SchemaRegistryClient
-from azure.schemaregistry.serializer.avroserializer import SchemaRegistryAvroSerializer
+from azure.schemaregistry.serializer.avroserializer import AvroSerializer
TENANT_ID=os.environ['AZURE_TENANT_ID']
CLIENT_ID=os.environ['AZURE_CLIENT_ID']
@@ -80,7 +80,7 @@ def deserialize(serializer, bytes_payload):
if __name__ == '__main__':
schema_registry = SchemaRegistryClient(endpoint=SCHEMAREGISTRY_FULLY_QUALIFIED_NAMESPACE, credential=token_credential)
- serializer = SchemaRegistryAvroSerializer(client=schema_registry, group_name=GROUP_NAME, auto_register_schemas=True)
+ serializer = AvroSerializer(client=schema_registry, group_name=GROUP_NAME, auto_register_schemas=True)
bytes_data_ben, bytes_data_alice = serialize(serializer)
dict_data_ben = deserialize(serializer, bytes_data_ben)
dict_data_alice = deserialize(serializer, bytes_data_alice)
diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_receive_integration.py b/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_receive_integration.py
index 3cbf949adf00..5bcc2c803337 100644
--- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_receive_integration.py
+++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_receive_integration.py
@@ -6,7 +6,7 @@
# --------------------------------------------------------------------------------------------
"""
-Examples to show receiving events from EventHub with SchemaRegistryAvroSerializer integrated for data deserialization.
+Examples to show receiving events from EventHub with AvroSerializer integrated for data deserialization.
"""
# pylint: disable=C0111
@@ -14,7 +14,7 @@
from azure.eventhub import EventHubConsumerClient
from azure.identity import DefaultAzureCredential
from azure.schemaregistry import SchemaRegistryClient
-from azure.schemaregistry.serializer.avroserializer import SchemaRegistryAvroSerializer
+from azure.schemaregistry.serializer.avroserializer import AvroSerializer
EVENTHUB_CONNECTION_STR = os.environ['EVENT_HUB_CONN_STR']
EVENTHUB_NAME = os.environ['EVENT_HUB_NAME']
@@ -44,9 +44,9 @@ def on_event(partition_context, event):
)
-# create a SchemaRegistryAvroSerializer instance
+# create a AvroSerializer instance
# TODO: after 'azure-schemaregistry==1.0.0b3' is released, update 'endpoint' to 'fully_qualified_namespace'
-avro_serializer = SchemaRegistryAvroSerializer(
+avro_serializer = AvroSerializer(
client=SchemaRegistryClient(
endpoint=SCHEMAREGISTRY_FULLY_QUALIFIED_NAMESPACE,
credential=DefaultAzureCredential()
diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_send_integration.py b/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_send_integration.py
index d184e87d89a8..2821272efe29 100644
--- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_send_integration.py
+++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/samples/eventhub_send_integration.py
@@ -6,7 +6,7 @@
# --------------------------------------------------------------------------------------------
"""
-Examples to show sending event to EventHub with SchemaRegistryAvroSerializer integrated for data serialization.
+Examples to show sending event to EventHub with AvroSerializer integrated for data serialization.
"""
# pylint: disable=C0111
@@ -15,7 +15,7 @@
from azure.eventhub import EventHubProducerClient, EventData
from azure.identity import DefaultAzureCredential
from azure.schemaregistry import SchemaRegistryClient
-from azure.schemaregistry.serializer.avroserializer import SchemaRegistryAvroSerializer
+from azure.schemaregistry.serializer.avroserializer import AvroSerializer
EVENTHUB_CONNECTION_STR = os.environ['EVENT_HUB_CONN_STR']
EVENTHUB_NAME = os.environ['EVENT_HUB_NAME']
@@ -58,9 +58,9 @@ def send_event_data_batch(producer, serializer):
)
-# create a SchemaRegistryAvroSerializer instance
+# create a AvroSerializer instance
# TODO: after 'azure-schemaregistry==1.0.0b3' is released, update 'endpoint' to 'fully_qualified_namespace'
-avro_serializer = SchemaRegistryAvroSerializer(
+avro_serializer = AvroSerializer(
client=SchemaRegistryClient(
endpoint=SCHEMAREGISTRY_FULLY_QUALIFIED_NAMESPACE,
credential=DefaultAzureCredential()
diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_with_auto_register_schemas.yaml b/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_with_auto_register_schemas.yaml
index 90dc88aa96ec..cfd2920100e2 100644
--- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_with_auto_register_schemas.yaml
+++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_with_auto_register_schemas.yaml
@@ -28,7 +28,7 @@ interactions:
content-type:
- application/json
date:
- - Tue, 28 Sep 2021 22:27:25 GMT
+ - Thu, 30 Sep 2021 02:05:53 GMT
location:
- https://swathip-test-eventhubs.servicebus.windows.net:443/$schemagroups/fakegroup/schemas/example.avro.User/versions/1?api-version=2017-04
server:
diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_without_auto_register_schemas.yaml b/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_without_auto_register_schemas.yaml
index 7dce4b62fbfe..0feb5392eba4 100644
--- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_without_auto_register_schemas.yaml
+++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/recordings/test_avro_serializer.test_basic_sr_avro_serializer_without_auto_register_schemas.yaml
@@ -28,7 +28,7 @@ interactions:
content-type:
- application/json
date:
- - Tue, 28 Sep 2021 22:27:26 GMT
+ - Thu, 30 Sep 2021 02:05:54 GMT
location:
- https://swathip-test-eventhubs.servicebus.windows.net:443/$schemagroups/fakegroup/schemas/example.avro.User/versions/1?api-version=2017-04
server:
diff --git a/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/test_avro_serializer.py b/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/test_avro_serializer.py
index bf2bca41907f..f392d8431917 100644
--- a/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/test_avro_serializer.py
+++ b/sdk/schemaregistry/azure-schemaregistry-avroserializer/tests/test_avro_serializer.py
@@ -25,7 +25,7 @@
from io import BytesIO
from azure.schemaregistry import SchemaRegistryClient
-from azure.schemaregistry.serializer.avroserializer import SchemaRegistryAvroSerializer
+from azure.schemaregistry.serializer.avroserializer import AvroSerializer
from azure.schemaregistry.serializer.avroserializer._avro_serializer import AvroObjectSerializer
from azure.identity import ClientSecretCredential
from azure.core.exceptions import ClientAuthenticationError, ServiceRequestError, HttpResponseError
@@ -34,7 +34,7 @@
SchemaRegistryPowerShellPreparer = functools.partial(PowerShellPreparer, "schemaregistry", schemaregistry_fully_qualified_namespace="fake_resource.servicebus.windows.net/", schemaregistry_group="fakegroup")
-class SchemaRegistryAvroSerializerTests(AzureTestCase):
+class AvroSerializerTests(AzureTestCase):
def test_raw_avro_serializer(self):
schema_str = """{"namespace":"example.avro","type":"record","name":"User","fields":[{"name":"name","type":"string"},{"name":"favorite_number","type":["int","null"]},{"name":"favorite_color","type":["string","null"]}]}"""
@@ -78,7 +78,7 @@ def test_raw_avro_serializer_negative(self):
def test_basic_sr_avro_serializer_with_auto_register_schemas(self, schemaregistry_fully_qualified_namespace, schemaregistry_group, **kwargs):
# TODO: AFTER RELEASING azure-schemaregistry=1.0.0b3, UPDATE 'endpoint' to 'fully_qualified_namespace'
sr_client = self.create_basic_client(SchemaRegistryClient, endpoint=schemaregistry_fully_qualified_namespace)
- sr_avro_serializer = SchemaRegistryAvroSerializer(client=sr_client, group_name=schemaregistry_group, auto_register_schemas=True)
+ sr_avro_serializer = AvroSerializer(client=sr_client, group_name=schemaregistry_group, auto_register_schemas=True)
schema_str = """{"namespace":"example.avro","type":"record","name":"User","fields":[{"name":"name","type":"string"},{"name":"favorite_number","type":["int","null"]},{"name":"favorite_color","type":["string","null"]}]}"""
schema = avro.schema.parse(schema_str)
@@ -103,7 +103,7 @@ def test_basic_sr_avro_serializer_with_auto_register_schemas(self, schemaregistr
def test_basic_sr_avro_serializer_without_auto_register_schemas(self, schemaregistry_fully_qualified_namespace, schemaregistry_group, **kwargs):
# TODO: AFTER RELEASING azure-schemaregistry=1.0.0b3, UPDATE 'endpoint' to 'fully_qualified_namespace'
sr_client = self.create_basic_client(SchemaRegistryClient, endpoint=schemaregistry_fully_qualified_namespace)
- sr_avro_serializer = SchemaRegistryAvroSerializer(client=sr_client, group_name=schemaregistry_group)
+ sr_avro_serializer = AvroSerializer(client=sr_client, group_name=schemaregistry_group)
schema_str = """{"namespace":"example.avro","type":"record","name":"User","fields":[{"name":"name","type":"string"},{"name":"favorite_number","type":["int","null"]},{"name":"favorite_color","type":["string","null"]}]}"""
schema = avro.schema.parse(schema_str)
diff --git a/sdk/schemaregistry/azure-schemaregistry/azure/schemaregistry/serializer/__init__.py b/sdk/schemaregistry/azure-schemaregistry/azure/schemaregistry/serializer/__init__.py
index c36aaed14908..80f86cb969ec 100644
--- a/sdk/schemaregistry/azure-schemaregistry/azure/schemaregistry/serializer/__init__.py
+++ b/sdk/schemaregistry/azure-schemaregistry/azure/schemaregistry/serializer/__init__.py
@@ -23,3 +23,4 @@
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
+__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
diff --git a/sdk/search/azure-search-documents/CHANGELOG.md b/sdk/search/azure-search-documents/CHANGELOG.md
index c5911ab8e9a2..2e1ad6ff973d 100644
--- a/sdk/search/azure-search-documents/CHANGELOG.md
+++ b/sdk/search/azure-search-documents/CHANGELOG.md
@@ -1,6 +1,6 @@
# Release History
-## 11.3.0b4 (Unreleased)
+## 11.3.0b4 (2021-10-05)
### Features Added
@@ -10,19 +10,18 @@
### Breaking Changes
- Renamed `SearchClient.speller` to `SearchClient.query_speller`.
+- Renamed model `Speller` to `QuerySpellerType`.
+- Renamed model `Answers` to `QueryAnswerType`.
- Removed keyword arguments from `SearchClient`: `answers` and `captions`.
- `SentimentSkill`, `EntityRecognitionSkill`: added client-side validation to prevent sending unsupported parameters.
-
-### Bugs Fixed
-
-### Other Changes
+- Renamed property `ignore_reset_requirements` to `skip_indexer_reset_requirement_for_cache`.
## 11.3.0b3 (2021-09-08)
### Features Added
- Added new models:
- - `azure.search.documents.models.Captions`
+ - `azure.search.documents.models.QueryCaptionType`
- `azure.search.documents.models.CaptionResult`
- `azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage`
- `azure.search.documents.indexes.models.EntityRecognitionSkillVersion`
diff --git a/sdk/search/azure-search-documents/README.md b/sdk/search/azure-search-documents/README.md
index ecda3962f4e2..abc438a29800 100644
--- a/sdk/search/azure-search-documents/README.md
+++ b/sdk/search/azure-search-documents/README.md
@@ -40,6 +40,9 @@ Use the Azure.Search.Documents client library to:
[Product documentation](https://docs.microsoft.com/azure/search/search-what-is-azure-search) |
[Samples](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/search/azure-search-documents/samples)
+## _Disclaimer_
+
+_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_
## Getting started
diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_vendor.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_vendor.py
new file mode 100644
index 000000000000..138f663c53a4
--- /dev/null
+++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_vendor.py
@@ -0,0 +1,27 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.pipeline.transport import HttpRequest
+
+def _convert_request(request, files=None):
+ data = request.content if not files else None
+ request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data)
+ if files:
+ request.set_formdata_body(files)
+ return request
+
+def _format_url_section(template, **kwargs):
+ components = template.split("/")
+ while components:
+ try:
+ return template.format(**kwargs)
+ except KeyError as key:
+ formatted_components = template.split("/")
+ components = [
+ c for c in formatted_components if "{}".format(key.args[0]) not in c
+ ]
+ template = "/".join(components)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py
index bd89a2bfee90..faae528b930e 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py
@@ -16,6 +16,7 @@
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
+from ..._vendor import _convert_request
from ...operations._documents_operations import build_autocomplete_get_request, build_autocomplete_post_request, build_count_request, build_get_request, build_index_request, build_search_get_request, build_search_post_request, build_suggest_get_request, build_suggest_post_request
T = TypeVar('T')
@@ -71,19 +72,20 @@ async def count(
request = build_count_request(
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.count.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('long', pipeline_response)
@@ -202,19 +204,20 @@ async def search_get(
semantic_fields=_semantic_fields,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.search_get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchDocumentsResult', pipeline_response)
@@ -263,19 +266,20 @@ async def search_post(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.search_post.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchDocumentsResult', pipeline_response)
@@ -325,19 +329,20 @@ async def get(
selected_fields=selected_fields,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('object', pipeline_response)
@@ -419,19 +424,20 @@ async def suggest_get(
top=_top,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.suggest_get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response)
@@ -480,19 +486,20 @@ async def suggest_post(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.suggest_post.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response)
@@ -542,19 +549,20 @@ async def index(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.index.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 207]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
@@ -636,19 +644,20 @@ async def autocomplete_get(
search_fields=_search_fields,
top=_top,
template_url=self.autocomplete_get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AutocompleteResult', pipeline_response)
@@ -697,19 +706,20 @@ async def autocomplete_post(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.autocomplete_post.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AutocompleteResult', pipeline_response)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py
index cec096d25420..014f69d11880 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py
@@ -56,7 +56,10 @@
AutocompleteMode,
Captions,
IndexActionType,
+ QueryAnswerType,
+ QueryCaptionType,
QueryLanguage,
+ QuerySpellerType,
QueryType,
ScoringStatistics,
SearchMode,
@@ -89,7 +92,10 @@
'AutocompleteMode',
'Captions',
'IndexActionType',
+ 'QueryAnswerType',
+ 'QueryCaptionType',
'QueryLanguage',
+ 'QuerySpellerType',
'QueryType',
'ScoringStatistics',
'SearchMode',
diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py
index 7993e91f7cfe..735efd768921 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py
@@ -15,9 +15,9 @@ class AnswerResult(msrest.serialization.Model):
Variables are only populated by the server, and will be ignored when sending a request.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
+ :vartype additional_properties: dict[str, any]
:ivar score: The score value represents how relevant the answer is to the the query relative to
other answers returned for the query.
:vartype score: float
@@ -49,6 +49,11 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ """
super(AnswerResult, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.score = None
@@ -84,6 +89,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(AutocompleteItem, self).__init__(**kwargs)
self.text = None
self.query_plus_text = None
@@ -92,36 +99,36 @@ def __init__(
class AutocompleteOptions(msrest.serialization.Model):
"""Parameter group.
- :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use
+ :ivar autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use
'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing
auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext".
- :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode
- :keyword filter: An OData expression that filters the documents used to produce completed terms
+ :vartype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode
+ :ivar filter: An OData expression that filters the documents used to produce completed terms
for the Autocomplete result.
- :paramtype filter: str
- :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
- autocomplete query. Default is false. When set to true, the query will find terms even if
- there's a substituted or missing character in the search text. While this provides a better
- experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are
- slower and consume more resources.
- :paramtype use_fuzzy_matching: bool
- :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ :vartype filter: str
+ :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the autocomplete
+ query. Default is false. When set to true, the query will find terms even if there's a
+ substituted or missing character in the search text. While this provides a better experience in
+ some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and
+ consume more resources.
+ :vartype use_fuzzy_matching: bool
+ :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. If omitted, hit highlighting is disabled.
- :paramtype highlight_post_tag: str
- :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ :vartype highlight_post_tag: str
+ :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. If omitted, hit highlighting is disabled.
- :paramtype highlight_pre_tag: str
- :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
- that must be covered by an autocomplete query in order for the query to be reported as a
- success. This parameter can be useful for ensuring search availability even for services with
- only one replica. The default is 80.
- :paramtype minimum_coverage: float
- :keyword search_fields: The list of field names to consider when querying for auto-completed
+ :vartype highlight_pre_tag: str
+ :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
+ must be covered by an autocomplete query in order for the query to be reported as a success.
+ This parameter can be useful for ensuring search availability even for services with only one
+ replica. The default is 80.
+ :vartype minimum_coverage: float
+ :ivar search_fields: The list of field names to consider when querying for auto-completed
terms. Target fields must be included in the specified suggester.
- :paramtype search_fields: list[str]
- :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1
- and 100. The default is 5.
- :paramtype top: int
+ :vartype search_fields: list[str]
+ :ivar top: The number of auto-completed terms to retrieve. This must be a value between 1 and
+ 100. The default is 5.
+ :vartype top: int
"""
_attribute_map = {
@@ -139,6 +146,38 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use
+ 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing
+ auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext".
+ :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode
+ :keyword filter: An OData expression that filters the documents used to produce completed terms
+ for the Autocomplete result.
+ :paramtype filter: str
+ :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
+ autocomplete query. Default is false. When set to true, the query will find terms even if
+ there's a substituted or missing character in the search text. While this provides a better
+ experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are
+ slower and consume more resources.
+ :paramtype use_fuzzy_matching: bool
+ :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ highlightPreTag. If omitted, hit highlighting is disabled.
+ :paramtype highlight_post_tag: str
+ :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ highlightPostTag. If omitted, hit highlighting is disabled.
+ :paramtype highlight_pre_tag: str
+ :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
+ that must be covered by an autocomplete query in order for the query to be reported as a
+ success. This parameter can be useful for ensuring search availability even for services with
+ only one replica. The default is 80.
+ :paramtype minimum_coverage: float
+ :keyword search_fields: The list of field names to consider when querying for auto-completed
+ terms. Target fields must be included in the specified suggester.
+ :paramtype search_fields: list[str]
+ :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1
+ and 100. The default is 5.
+ :paramtype top: int
+ """
super(AutocompleteOptions, self).__init__(**kwargs)
self.autocomplete_mode = kwargs.get('autocomplete_mode', None)
self.filter = kwargs.get('filter', None)
@@ -155,41 +194,41 @@ class AutocompleteRequest(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword search_text: Required. The search text on which to base autocomplete results.
- :paramtype search_text: str
- :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use
+ :ivar search_text: Required. The search text on which to base autocomplete results.
+ :vartype search_text: str
+ :ivar autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use
'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing
auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext".
- :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode
- :keyword filter: An OData expression that filters the documents used to produce completed terms
+ :vartype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode
+ :ivar filter: An OData expression that filters the documents used to produce completed terms
for the Autocomplete result.
- :paramtype filter: str
- :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
- autocomplete query. Default is false. When set to true, the query will autocomplete terms even
- if there's a substituted or missing character in the search text. While this provides a better
- experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are
- slower and consume more resources.
- :paramtype use_fuzzy_matching: bool
- :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ :vartype filter: str
+ :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the autocomplete
+ query. Default is false. When set to true, the query will autocomplete terms even if there's a
+ substituted or missing character in the search text. While this provides a better experience in
+ some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and
+ consume more resources.
+ :vartype use_fuzzy_matching: bool
+ :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. If omitted, hit highlighting is disabled.
- :paramtype highlight_post_tag: str
- :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ :vartype highlight_post_tag: str
+ :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. If omitted, hit highlighting is disabled.
- :paramtype highlight_pre_tag: str
- :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
- that must be covered by an autocomplete query in order for the query to be reported as a
- success. This parameter can be useful for ensuring search availability even for services with
- only one replica. The default is 80.
- :paramtype minimum_coverage: float
- :keyword search_fields: The comma-separated list of field names to consider when querying for
+ :vartype highlight_pre_tag: str
+ :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
+ must be covered by an autocomplete query in order for the query to be reported as a success.
+ This parameter can be useful for ensuring search availability even for services with only one
+ replica. The default is 80.
+ :vartype minimum_coverage: float
+ :ivar search_fields: The comma-separated list of field names to consider when querying for
auto-completed terms. Target fields must be included in the specified suggester.
- :paramtype search_fields: str
- :keyword suggester_name: Required. The name of the suggester as specified in the suggesters
+ :vartype search_fields: str
+ :ivar suggester_name: Required. The name of the suggester as specified in the suggesters
collection that's part of the index definition.
- :paramtype suggester_name: str
- :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1
- and 100. The default is 5.
- :paramtype top: int
+ :vartype suggester_name: str
+ :ivar top: The number of auto-completed terms to retrieve. This must be a value between 1 and
+ 100. The default is 5.
+ :vartype top: int
"""
_validation = {
@@ -214,6 +253,43 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword search_text: Required. The search text on which to base autocomplete results.
+ :paramtype search_text: str
+ :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use
+ 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing
+ auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext".
+ :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode
+ :keyword filter: An OData expression that filters the documents used to produce completed terms
+ for the Autocomplete result.
+ :paramtype filter: str
+ :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
+ autocomplete query. Default is false. When set to true, the query will autocomplete terms even
+ if there's a substituted or missing character in the search text. While this provides a better
+ experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are
+ slower and consume more resources.
+ :paramtype use_fuzzy_matching: bool
+ :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ highlightPreTag. If omitted, hit highlighting is disabled.
+ :paramtype highlight_post_tag: str
+ :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ highlightPostTag. If omitted, hit highlighting is disabled.
+ :paramtype highlight_pre_tag: str
+ :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
+ that must be covered by an autocomplete query in order for the query to be reported as a
+ success. This parameter can be useful for ensuring search availability even for services with
+ only one replica. The default is 80.
+ :paramtype minimum_coverage: float
+ :keyword search_fields: The comma-separated list of field names to consider when querying for
+ auto-completed terms. Target fields must be included in the specified suggester.
+ :paramtype search_fields: str
+ :keyword suggester_name: Required. The name of the suggester as specified in the suggesters
+ collection that's part of the index definition.
+ :paramtype suggester_name: str
+ :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1
+ and 100. The default is 5.
+ :paramtype top: int
+ """
super(AutocompleteRequest, self).__init__(**kwargs)
self.search_text = kwargs['search_text']
self.autocomplete_mode = kwargs.get('autocomplete_mode', None)
@@ -255,6 +331,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(AutocompleteResult, self).__init__(**kwargs)
self.coverage = None
self.results = None
@@ -265,9 +343,9 @@ class CaptionResult(msrest.serialization.Model):
Variables are only populated by the server, and will be ignored when sending a request.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
+ :vartype additional_properties: dict[str, any]
:ivar text: A representative text passage extracted from the document most relevant to the
search query.
:vartype text: str
@@ -291,6 +369,11 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ """
super(CaptionResult, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.text = None
@@ -302,9 +385,9 @@ class FacetResult(msrest.serialization.Model):
Variables are only populated by the server, and will be ignored when sending a request.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
+ :vartype additional_properties: dict[str, any]
:ivar count: The approximate count of documents falling within the bucket described by this
facet.
:vartype count: long
@@ -323,6 +406,11 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ """
super(FacetResult, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.count = None
@@ -331,12 +419,12 @@ def __init__(
class IndexAction(msrest.serialization.Model):
"""Represents an index action that operates on a document.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
- :keyword action_type: The operation to perform on a document in an indexing batch. Possible
- values include: "upload", "merge", "mergeOrUpload", "delete".
- :paramtype action_type: str or ~azure.search.documents.models.IndexActionType
+ :vartype additional_properties: dict[str, any]
+ :ivar action_type: The operation to perform on a document in an indexing batch. Possible values
+ include: "upload", "merge", "mergeOrUpload", "delete".
+ :vartype action_type: str or ~azure.search.documents.models.IndexActionType
"""
_attribute_map = {
@@ -348,6 +436,14 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ :keyword action_type: The operation to perform on a document in an indexing batch. Possible
+ values include: "upload", "merge", "mergeOrUpload", "delete".
+ :paramtype action_type: str or ~azure.search.documents.models.IndexActionType
+ """
super(IndexAction, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.action_type = kwargs.get('action_type', None)
@@ -358,8 +454,8 @@ class IndexBatch(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword actions: Required. The actions in the batch.
- :paramtype actions: list[~azure.search.documents.models.IndexAction]
+ :ivar actions: Required. The actions in the batch.
+ :vartype actions: list[~azure.search.documents.models.IndexAction]
"""
_validation = {
@@ -374,6 +470,10 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword actions: Required. The actions in the batch.
+ :paramtype actions: list[~azure.search.documents.models.IndexAction]
+ """
super(IndexBatch, self).__init__(**kwargs)
self.actions = kwargs['actions']
@@ -402,6 +502,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(IndexDocumentsResult, self).__init__(**kwargs)
self.results = None
@@ -446,6 +548,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(IndexingResult, self).__init__(**kwargs)
self.key = None
self.error_message = None
@@ -456,8 +560,8 @@ def __init__(
class RequestOptions(msrest.serialization.Model):
"""Parameter group.
- :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
- :paramtype x_ms_client_request_id: str
+ :ivar x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
+ :vartype x_ms_client_request_id: str
"""
_attribute_map = {
@@ -468,6 +572,10 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
+ :paramtype x_ms_client_request_id: str
+ """
super(RequestOptions, self).__init__(**kwargs)
self.x_ms_client_request_id = kwargs.get('x_ms_client_request_id', None)
@@ -531,6 +639,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchDocumentsResult, self).__init__(**kwargs)
self.count = None
self.coverage = None
@@ -572,6 +682,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchError, self).__init__(**kwargs)
self.code = None
self.message = None
@@ -581,98 +693,97 @@ def __init__(
class SearchOptions(msrest.serialization.Model):
"""Parameter group.
- :keyword include_total_result_count: A value that specifies whether to fetch the total count of
+ :ivar include_total_result_count: A value that specifies whether to fetch the total count of
results. Default is false. Setting this value to true may have a performance impact. Note that
the count returned is an approximation.
- :paramtype include_total_result_count: bool
- :keyword facets: The list of facet expressions to apply to the search query. Each facet
- expression contains a field name, optionally followed by a comma-separated list of name:value
- pairs.
- :paramtype facets: list[str]
- :keyword filter: The OData $filter expression to apply to the search query.
- :paramtype filter: str
- :keyword highlight_fields: The list of field names to use for hit highlights. Only searchable
+ :vartype include_total_result_count: bool
+ :ivar facets: The list of facet expressions to apply to the search query. Each facet expression
+ contains a field name, optionally followed by a comma-separated list of name:value pairs.
+ :vartype facets: list[str]
+ :ivar filter: The OData $filter expression to apply to the search query.
+ :vartype filter: str
+ :ivar highlight_fields: The list of field names to use for hit highlights. Only searchable
fields can be used for hit highlighting.
- :paramtype highlight_fields: list[str]
- :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ :vartype highlight_fields: list[str]
+ :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. Default is </em>.
- :paramtype highlight_post_tag: str
- :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ :vartype highlight_post_tag: str
+ :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. Default is <em>.
- :paramtype highlight_pre_tag: str
- :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
- that must be covered by a search query in order for the query to be reported as a success. This
+ :vartype highlight_pre_tag: str
+ :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
+ must be covered by a search query in order for the query to be reported as a success. This
parameter can be useful for ensuring search availability even for services with only one
replica. The default is 100.
- :paramtype minimum_coverage: float
- :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each
+ :vartype minimum_coverage: float
+ :ivar order_by: The list of OData $orderby expressions by which to sort the results. Each
expression can be either a field name or a call to either the geo.distance() or the
search.score() functions. Each expression can be followed by asc to indicate ascending, and
desc to indicate descending. The default is ascending order. Ties will be broken by the match
scores of documents. If no OrderBy is specified, the default sort order is descending by
document match score. There can be at most 32 $orderby clauses.
- :paramtype order_by: list[str]
- :keyword query_type: A value that specifies the syntax of the search query. The default is
+ :vartype order_by: list[str]
+ :ivar query_type: A value that specifies the syntax of the search query. The default is
'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include:
"simple", "full", "semantic".
- :paramtype query_type: str or ~azure.search.documents.models.QueryType
- :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for
+ :vartype query_type: str or ~azure.search.documents.models.QueryType
+ :ivar scoring_parameters: The list of parameter values to be used in scoring functions (for
example, referencePointParameter) using the format name-values. For example, if the scoring
profile defines a function with a parameter called 'mylocation' the parameter string would be
"mylocation--122.2,44.8" (without the quotes).
- :paramtype scoring_parameters: list[str]
- :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching
+ :vartype scoring_parameters: list[str]
+ :ivar scoring_profile: The name of a scoring profile to evaluate match scores for matching
documents in order to sort the results.
- :paramtype scoring_profile: str
- :keyword search_fields: The list of field names to which to scope the full-text search. When
- using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of
- each fielded search expression take precedence over any field names listed in this parameter.
- :paramtype search_fields: list[str]
- :keyword query_language: The language of the query. Possible values include: "none", "en-us".
- :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage
- :keyword speller: Improve search recall by spell-correcting individual search query terms.
+ :vartype scoring_profile: str
+ :ivar search_fields: The list of field names to which to scope the full-text search. When using
+ fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each
+ fielded search expression take precedence over any field names listed in this parameter.
+ :vartype search_fields: list[str]
+ :ivar query_language: The language of the query. Possible values include: "none", "en-us".
+ :vartype query_language: str or ~azure.search.documents.models.QueryLanguage
+ :ivar speller: Improve search recall by spell-correcting individual search query terms.
Possible values include: "none", "lexicon".
- :paramtype speller: str or ~azure.search.documents.models.Speller
- :keyword answers: This parameter is only valid if the query type is 'semantic'. If set, the
- query returns answers extracted from key passages in the highest ranked documents. The number
- of answers returned can be configured by appending the pipe character '|' followed by the
+ :vartype speller: str or ~azure.search.documents.models.Speller
+ :ivar answers: This parameter is only valid if the query type is 'semantic'. If set, the query
+ returns answers extracted from key passages in the highest ranked documents. The number of
+ answers returned can be configured by appending the pipe character '|' followed by the
'count-:code:``' option after the answers parameter value, such as
'extractive|count-3'. Default count is 1. Possible values include: "none", "extractive".
- :paramtype answers: str or ~azure.search.documents.models.Answers
- :keyword search_mode: A value that specifies whether any or all of the search terms must be
+ :vartype answers: str or ~azure.search.documents.models.Answers
+ :ivar search_mode: A value that specifies whether any or all of the search terms must be
matched in order to count the document as a match. Possible values include: "any", "all".
- :paramtype search_mode: str or ~azure.search.documents.models.SearchMode
- :keyword scoring_statistics: A value that specifies whether we want to calculate scoring
+ :vartype search_mode: str or ~azure.search.documents.models.SearchMode
+ :ivar scoring_statistics: A value that specifies whether we want to calculate scoring
statistics (such as document frequency) globally for more consistent scoring, or locally, for
lower latency. Possible values include: "local", "global".
- :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics
- :keyword session_id: A value to be used to create a sticky session, which can help to get more
+ :vartype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics
+ :ivar session_id: A value to be used to create a sticky session, which can help to get more
consistent results. As long as the same sessionId is used, a best-effort attempt will be made
to target the same replica set. Be wary that reusing the same sessionID values repeatedly can
interfere with the load balancing of the requests across replicas and adversely affect the
performance of the search service. The value used as sessionId cannot start with a '_'
character.
- :paramtype session_id: str
- :keyword select: The list of fields to retrieve. If unspecified, all fields marked as
- retrievable in the schema are included.
- :paramtype select: list[str]
- :keyword skip: The number of search results to skip. This value cannot be greater than 100,000.
- If you need to scan documents in sequence, but cannot use $skip due to this limitation,
- consider using $orderby on a totally-ordered key and $filter with a range query instead.
- :paramtype skip: int
- :keyword top: The number of search results to retrieve. This can be used in conjunction with
- $skip to implement client-side paging of search results. If results are truncated due to
- server-side paging, the response will include a continuation token that can be used to issue
- another Search request for the next page of results.
- :paramtype top: int
- :keyword captions: This parameter is only valid if the query type is 'semantic'. If set, the
- query returns captions extracted from key passages in the highest ranked documents. When
- Captions is set to 'extractive', highlighting is enabled by default, and can be configured by
- appending the pipe character '|' followed by the 'highlight-' option, such as
+ :vartype session_id: str
+ :ivar select: The list of fields to retrieve. If unspecified, all fields marked as retrievable
+ in the schema are included.
+ :vartype select: list[str]
+ :ivar skip: The number of search results to skip. This value cannot be greater than 100,000. If
+ you need to scan documents in sequence, but cannot use $skip due to this limitation, consider
+ using $orderby on a totally-ordered key and $filter with a range query instead.
+ :vartype skip: int
+ :ivar top: The number of search results to retrieve. This can be used in conjunction with $skip
+ to implement client-side paging of search results. If results are truncated due to server-side
+ paging, the response will include a continuation token that can be used to issue another Search
+ request for the next page of results.
+ :vartype top: int
+ :ivar captions: This parameter is only valid if the query type is 'semantic'. If set, the query
+ returns captions extracted from key passages in the highest ranked documents. When Captions is
+ set to 'extractive', highlighting is enabled by default, and can be configured by appending the
+ pipe character '|' followed by the 'highlight-' option, such as
'extractive|highlight-true'. Defaults to 'None'. Possible values include: "none", "extractive".
- :paramtype captions: str or ~azure.search.documents.models.Captions
- :keyword semantic_fields: The list of field names used for semantic search.
- :paramtype semantic_fields: list[str]
+ :vartype captions: str or ~azure.search.documents.models.Captions
+ :ivar semantic_fields: The list of field names used for semantic search.
+ :vartype semantic_fields: list[str]
"""
_attribute_map = {
@@ -705,6 +816,100 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword include_total_result_count: A value that specifies whether to fetch the total count of
+ results. Default is false. Setting this value to true may have a performance impact. Note that
+ the count returned is an approximation.
+ :paramtype include_total_result_count: bool
+ :keyword facets: The list of facet expressions to apply to the search query. Each facet
+ expression contains a field name, optionally followed by a comma-separated list of name:value
+ pairs.
+ :paramtype facets: list[str]
+ :keyword filter: The OData $filter expression to apply to the search query.
+ :paramtype filter: str
+ :keyword highlight_fields: The list of field names to use for hit highlights. Only searchable
+ fields can be used for hit highlighting.
+ :paramtype highlight_fields: list[str]
+ :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ highlightPreTag. Default is </em>.
+ :paramtype highlight_post_tag: str
+ :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ highlightPostTag. Default is <em>.
+ :paramtype highlight_pre_tag: str
+ :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
+ that must be covered by a search query in order for the query to be reported as a success. This
+ parameter can be useful for ensuring search availability even for services with only one
+ replica. The default is 100.
+ :paramtype minimum_coverage: float
+ :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each
+ expression can be either a field name or a call to either the geo.distance() or the
+ search.score() functions. Each expression can be followed by asc to indicate ascending, and
+ desc to indicate descending. The default is ascending order. Ties will be broken by the match
+ scores of documents. If no OrderBy is specified, the default sort order is descending by
+ document match score. There can be at most 32 $orderby clauses.
+ :paramtype order_by: list[str]
+ :keyword query_type: A value that specifies the syntax of the search query. The default is
+ 'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include:
+ "simple", "full", "semantic".
+ :paramtype query_type: str or ~azure.search.documents.models.QueryType
+ :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for
+ example, referencePointParameter) using the format name-values. For example, if the scoring
+ profile defines a function with a parameter called 'mylocation' the parameter string would be
+ "mylocation--122.2,44.8" (without the quotes).
+ :paramtype scoring_parameters: list[str]
+ :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching
+ documents in order to sort the results.
+ :paramtype scoring_profile: str
+ :keyword search_fields: The list of field names to which to scope the full-text search. When
+ using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of
+ each fielded search expression take precedence over any field names listed in this parameter.
+ :paramtype search_fields: list[str]
+ :keyword query_language: The language of the query. Possible values include: "none", "en-us".
+ :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage
+ :keyword speller: Improve search recall by spell-correcting individual search query terms.
+ Possible values include: "none", "lexicon".
+ :paramtype speller: str or ~azure.search.documents.models.Speller
+ :keyword answers: This parameter is only valid if the query type is 'semantic'. If set, the
+ query returns answers extracted from key passages in the highest ranked documents. The number
+ of answers returned can be configured by appending the pipe character '|' followed by the
+ 'count-:code:``' option after the answers parameter value, such as
+ 'extractive|count-3'. Default count is 1. Possible values include: "none", "extractive".
+ :paramtype answers: str or ~azure.search.documents.models.Answers
+ :keyword search_mode: A value that specifies whether any or all of the search terms must be
+ matched in order to count the document as a match. Possible values include: "any", "all".
+ :paramtype search_mode: str or ~azure.search.documents.models.SearchMode
+ :keyword scoring_statistics: A value that specifies whether we want to calculate scoring
+ statistics (such as document frequency) globally for more consistent scoring, or locally, for
+ lower latency. Possible values include: "local", "global".
+ :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics
+ :keyword session_id: A value to be used to create a sticky session, which can help to get more
+ consistent results. As long as the same sessionId is used, a best-effort attempt will be made
+ to target the same replica set. Be wary that reusing the same sessionID values repeatedly can
+ interfere with the load balancing of the requests across replicas and adversely affect the
+ performance of the search service. The value used as sessionId cannot start with a '_'
+ character.
+ :paramtype session_id: str
+ :keyword select: The list of fields to retrieve. If unspecified, all fields marked as
+ retrievable in the schema are included.
+ :paramtype select: list[str]
+ :keyword skip: The number of search results to skip. This value cannot be greater than 100,000.
+ If you need to scan documents in sequence, but cannot use $skip due to this limitation,
+ consider using $orderby on a totally-ordered key and $filter with a range query instead.
+ :paramtype skip: int
+ :keyword top: The number of search results to retrieve. This can be used in conjunction with
+ $skip to implement client-side paging of search results. If results are truncated due to
+ server-side paging, the response will include a continuation token that can be used to issue
+ another Search request for the next page of results.
+ :paramtype top: int
+ :keyword captions: This parameter is only valid if the query type is 'semantic'. If set, the
+ query returns captions extracted from key passages in the highest ranked documents. When
+ Captions is set to 'extractive', highlighting is enabled by default, and can be configured by
+ appending the pipe character '|' followed by the 'highlight-' option, such as
+ 'extractive|highlight-true'. Defaults to 'None'. Possible values include: "none", "extractive".
+ :paramtype captions: str or ~azure.search.documents.models.Captions
+ :keyword semantic_fields: The list of field names used for semantic search.
+ :paramtype semantic_fields: list[str]
+ """
super(SearchOptions, self).__init__(**kwargs)
self.include_total_result_count = kwargs.get('include_total_result_count', None)
self.facets = kwargs.get('facets', None)
@@ -734,99 +939,98 @@ def __init__(
class SearchRequest(msrest.serialization.Model):
"""Parameters for filtering, sorting, faceting, paging, and other search query behaviors.
- :keyword include_total_result_count: A value that specifies whether to fetch the total count of
+ :ivar include_total_result_count: A value that specifies whether to fetch the total count of
results. Default is false. Setting this value to true may have a performance impact. Note that
the count returned is an approximation.
- :paramtype include_total_result_count: bool
- :keyword facets: The list of facet expressions to apply to the search query. Each facet
- expression contains a field name, optionally followed by a comma-separated list of name:value
- pairs.
- :paramtype facets: list[str]
- :keyword filter: The OData $filter expression to apply to the search query.
- :paramtype filter: str
- :keyword highlight_fields: The comma-separated list of field names to use for hit highlights.
- Only searchable fields can be used for hit highlighting.
- :paramtype highlight_fields: str
- :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ :vartype include_total_result_count: bool
+ :ivar facets: The list of facet expressions to apply to the search query. Each facet expression
+ contains a field name, optionally followed by a comma-separated list of name:value pairs.
+ :vartype facets: list[str]
+ :ivar filter: The OData $filter expression to apply to the search query.
+ :vartype filter: str
+ :ivar highlight_fields: The comma-separated list of field names to use for hit highlights. Only
+ searchable fields can be used for hit highlighting.
+ :vartype highlight_fields: str
+ :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. Default is </em>.
- :paramtype highlight_post_tag: str
- :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ :vartype highlight_post_tag: str
+ :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. Default is <em>.
- :paramtype highlight_pre_tag: str
- :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
- that must be covered by a search query in order for the query to be reported as a success. This
+ :vartype highlight_pre_tag: str
+ :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
+ must be covered by a search query in order for the query to be reported as a success. This
parameter can be useful for ensuring search availability even for services with only one
replica. The default is 100.
- :paramtype minimum_coverage: float
- :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the
+ :vartype minimum_coverage: float
+ :ivar order_by: The comma-separated list of OData $orderby expressions by which to sort the
results. Each expression can be either a field name or a call to either the geo.distance() or
the search.score() functions. Each expression can be followed by asc to indicate ascending, or
desc to indicate descending. The default is ascending order. Ties will be broken by the match
scores of documents. If no $orderby is specified, the default sort order is descending by
document match score. There can be at most 32 $orderby clauses.
- :paramtype order_by: str
- :keyword query_type: A value that specifies the syntax of the search query. The default is
+ :vartype order_by: str
+ :ivar query_type: A value that specifies the syntax of the search query. The default is
'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include:
"simple", "full", "semantic".
- :paramtype query_type: str or ~azure.search.documents.models.QueryType
- :keyword scoring_statistics: A value that specifies whether we want to calculate scoring
+ :vartype query_type: str or ~azure.search.documents.models.QueryType
+ :ivar scoring_statistics: A value that specifies whether we want to calculate scoring
statistics (such as document frequency) globally for more consistent scoring, or locally, for
lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally
before scoring. Using global scoring statistics can increase latency of search queries.
Possible values include: "local", "global".
- :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics
- :keyword session_id: A value to be used to create a sticky session, which can help getting more
+ :vartype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics
+ :ivar session_id: A value to be used to create a sticky session, which can help getting more
consistent results. As long as the same sessionId is used, a best-effort attempt will be made
to target the same replica set. Be wary that reusing the same sessionID values repeatedly can
interfere with the load balancing of the requests across replicas and adversely affect the
performance of the search service. The value used as sessionId cannot start with a '_'
character.
- :paramtype session_id: str
- :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for
+ :vartype session_id: str
+ :ivar scoring_parameters: The list of parameter values to be used in scoring functions (for
example, referencePointParameter) using the format name-values. For example, if the scoring
profile defines a function with a parameter called 'mylocation' the parameter string would be
"mylocation--122.2,44.8" (without the quotes).
- :paramtype scoring_parameters: list[str]
- :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching
+ :vartype scoring_parameters: list[str]
+ :ivar scoring_profile: The name of a scoring profile to evaluate match scores for matching
documents in order to sort the results.
- :paramtype scoring_profile: str
- :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to
- match all documents.
- :paramtype search_text: str
- :keyword search_fields: The comma-separated list of field names to which to scope the full-text
+ :vartype scoring_profile: str
+ :ivar search_text: A full-text search query expression; Use "*" or omit this parameter to match
+ all documents.
+ :vartype search_text: str
+ :ivar search_fields: The comma-separated list of field names to which to scope the full-text
search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the
field names of each fielded search expression take precedence over any field names listed in
this parameter.
- :paramtype search_fields: str
- :keyword search_mode: A value that specifies whether any or all of the search terms must be
+ :vartype search_fields: str
+ :ivar search_mode: A value that specifies whether any or all of the search terms must be
matched in order to count the document as a match. Possible values include: "any", "all".
- :paramtype search_mode: str or ~azure.search.documents.models.SearchMode
- :keyword query_language: A value that specifies the language of the search query. Possible
- values include: "none", "en-us".
- :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage
- :keyword speller: A value that specified the type of the speller to use to spell-correct
+ :vartype search_mode: str or ~azure.search.documents.models.SearchMode
+ :ivar query_language: A value that specifies the language of the search query. Possible values
+ include: "none", "en-us".
+ :vartype query_language: str or ~azure.search.documents.models.QueryLanguage
+ :ivar speller: A value that specified the type of the speller to use to spell-correct
individual search query terms. Possible values include: "none", "lexicon".
- :paramtype speller: str or ~azure.search.documents.models.Speller
- :keyword answers: A value that specifies whether answers should be returned as part of the
- search response. Possible values include: "none", "extractive".
- :paramtype answers: str or ~azure.search.documents.models.Answers
- :keyword select: The comma-separated list of fields to retrieve. If unspecified, all fields
- marked as retrievable in the schema are included.
- :paramtype select: str
- :keyword skip: The number of search results to skip. This value cannot be greater than 100,000.
- If you need to scan documents in sequence, but cannot use skip due to this limitation, consider
+ :vartype speller: str or ~azure.search.documents.models.QuerySpellerType
+ :ivar answers: A value that specifies whether answers should be returned as part of the search
+ response. Possible values include: "none", "extractive".
+ :vartype answers: str or ~azure.search.documents.models.QueryAnswerType
+ :ivar select: The comma-separated list of fields to retrieve. If unspecified, all fields marked
+ as retrievable in the schema are included.
+ :vartype select: str
+ :ivar skip: The number of search results to skip. This value cannot be greater than 100,000. If
+ you need to scan documents in sequence, but cannot use skip due to this limitation, consider
using orderby on a totally-ordered key and filter with a range query instead.
- :paramtype skip: int
- :keyword top: The number of search results to retrieve. This can be used in conjunction with
- $skip to implement client-side paging of search results. If results are truncated due to
- server-side paging, the response will include a continuation token that can be used to issue
- another Search request for the next page of results.
- :paramtype top: int
- :keyword captions: A value that specifies whether captions should be returned as part of the
+ :vartype skip: int
+ :ivar top: The number of search results to retrieve. This can be used in conjunction with $skip
+ to implement client-side paging of search results. If results are truncated due to server-side
+ paging, the response will include a continuation token that can be used to issue another Search
+ request for the next page of results.
+ :vartype top: int
+ :ivar captions: A value that specifies whether captions should be returned as part of the
search response. Possible values include: "none", "extractive".
- :paramtype captions: str or ~azure.search.documents.models.Captions
- :keyword semantic_fields: The comma-separated list of field names used for semantic search.
- :paramtype semantic_fields: str
+ :vartype captions: str or ~azure.search.documents.models.QueryCaptionType
+ :ivar semantic_fields: The comma-separated list of field names used for semantic search.
+ :vartype semantic_fields: str
"""
_attribute_map = {
@@ -860,6 +1064,101 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword include_total_result_count: A value that specifies whether to fetch the total count of
+ results. Default is false. Setting this value to true may have a performance impact. Note that
+ the count returned is an approximation.
+ :paramtype include_total_result_count: bool
+ :keyword facets: The list of facet expressions to apply to the search query. Each facet
+ expression contains a field name, optionally followed by a comma-separated list of name:value
+ pairs.
+ :paramtype facets: list[str]
+ :keyword filter: The OData $filter expression to apply to the search query.
+ :paramtype filter: str
+ :keyword highlight_fields: The comma-separated list of field names to use for hit highlights.
+ Only searchable fields can be used for hit highlighting.
+ :paramtype highlight_fields: str
+ :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ highlightPreTag. Default is </em>.
+ :paramtype highlight_post_tag: str
+ :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ highlightPostTag. Default is <em>.
+ :paramtype highlight_pre_tag: str
+ :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
+ that must be covered by a search query in order for the query to be reported as a success. This
+ parameter can be useful for ensuring search availability even for services with only one
+ replica. The default is 100.
+ :paramtype minimum_coverage: float
+ :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the
+ results. Each expression can be either a field name or a call to either the geo.distance() or
+ the search.score() functions. Each expression can be followed by asc to indicate ascending, or
+ desc to indicate descending. The default is ascending order. Ties will be broken by the match
+ scores of documents. If no $orderby is specified, the default sort order is descending by
+ document match score. There can be at most 32 $orderby clauses.
+ :paramtype order_by: str
+ :keyword query_type: A value that specifies the syntax of the search query. The default is
+ 'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include:
+ "simple", "full", "semantic".
+ :paramtype query_type: str or ~azure.search.documents.models.QueryType
+ :keyword scoring_statistics: A value that specifies whether we want to calculate scoring
+ statistics (such as document frequency) globally for more consistent scoring, or locally, for
+ lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally
+ before scoring. Using global scoring statistics can increase latency of search queries.
+ Possible values include: "local", "global".
+ :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics
+ :keyword session_id: A value to be used to create a sticky session, which can help getting more
+ consistent results. As long as the same sessionId is used, a best-effort attempt will be made
+ to target the same replica set. Be wary that reusing the same sessionID values repeatedly can
+ interfere with the load balancing of the requests across replicas and adversely affect the
+ performance of the search service. The value used as sessionId cannot start with a '_'
+ character.
+ :paramtype session_id: str
+ :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for
+ example, referencePointParameter) using the format name-values. For example, if the scoring
+ profile defines a function with a parameter called 'mylocation' the parameter string would be
+ "mylocation--122.2,44.8" (without the quotes).
+ :paramtype scoring_parameters: list[str]
+ :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching
+ documents in order to sort the results.
+ :paramtype scoring_profile: str
+ :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to
+ match all documents.
+ :paramtype search_text: str
+ :keyword search_fields: The comma-separated list of field names to which to scope the full-text
+ search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the
+ field names of each fielded search expression take precedence over any field names listed in
+ this parameter.
+ :paramtype search_fields: str
+ :keyword search_mode: A value that specifies whether any or all of the search terms must be
+ matched in order to count the document as a match. Possible values include: "any", "all".
+ :paramtype search_mode: str or ~azure.search.documents.models.SearchMode
+ :keyword query_language: A value that specifies the language of the search query. Possible
+ values include: "none", "en-us".
+ :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage
+ :keyword speller: A value that specified the type of the speller to use to spell-correct
+ individual search query terms. Possible values include: "none", "lexicon".
+ :paramtype speller: str or ~azure.search.documents.models.QuerySpellerType
+ :keyword answers: A value that specifies whether answers should be returned as part of the
+ search response. Possible values include: "none", "extractive".
+ :paramtype answers: str or ~azure.search.documents.models.QueryAnswerType
+ :keyword select: The comma-separated list of fields to retrieve. If unspecified, all fields
+ marked as retrievable in the schema are included.
+ :paramtype select: str
+ :keyword skip: The number of search results to skip. This value cannot be greater than 100,000.
+ If you need to scan documents in sequence, but cannot use skip due to this limitation, consider
+ using orderby on a totally-ordered key and filter with a range query instead.
+ :paramtype skip: int
+ :keyword top: The number of search results to retrieve. This can be used in conjunction with
+ $skip to implement client-side paging of search results. If results are truncated due to
+ server-side paging, the response will include a continuation token that can be used to issue
+ another Search request for the next page of results.
+ :paramtype top: int
+ :keyword captions: A value that specifies whether captions should be returned as part of the
+ search response. Possible values include: "none", "extractive".
+ :paramtype captions: str or ~azure.search.documents.models.QueryCaptionType
+ :keyword semantic_fields: The comma-separated list of field names used for semantic search.
+ :paramtype semantic_fields: str
+ """
super(SearchRequest, self).__init__(**kwargs)
self.include_total_result_count = kwargs.get('include_total_result_count', None)
self.facets = kwargs.get('facets', None)
@@ -894,9 +1193,9 @@ class SearchResult(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
+ :vartype additional_properties: dict[str, any]
:ivar score: Required. The relevance score of the document compared to other documents returned
by the query.
:vartype score: float
@@ -932,6 +1231,11 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ """
super(SearchResult, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.score = None
@@ -968,6 +1272,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SuggestDocumentsResult, self).__init__(**kwargs)
self.results = None
self.coverage = None
@@ -976,41 +1282,41 @@ def __init__(
class SuggestOptions(msrest.serialization.Model):
"""Parameter group.
- :keyword filter: An OData expression that filters the documents considered for suggestions.
- :paramtype filter: str
- :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
- suggestions query. Default is false. When set to true, the query will find terms even if
- there's a substituted or missing character in the search text. While this provides a better
- experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are
- slower and consume more resources.
- :paramtype use_fuzzy_matching: bool
- :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ :ivar filter: An OData expression that filters the documents considered for suggestions.
+ :vartype filter: str
+ :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestions
+ query. Default is false. When set to true, the query will find terms even if there's a
+ substituted or missing character in the search text. While this provides a better experience in
+ some scenarios, it comes at a performance cost as fuzzy suggestions queries are slower and
+ consume more resources.
+ :vartype use_fuzzy_matching: bool
+ :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. If omitted, hit highlighting of suggestions is disabled.
- :paramtype highlight_post_tag: str
- :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ :vartype highlight_post_tag: str
+ :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. If omitted, hit highlighting of suggestions is disabled.
- :paramtype highlight_pre_tag: str
- :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
- that must be covered by a suggestions query in order for the query to be reported as a success.
- This parameter can be useful for ensuring search availability even for services with only one
+ :vartype highlight_pre_tag: str
+ :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
+ must be covered by a suggestions query in order for the query to be reported as a success. This
+ parameter can be useful for ensuring search availability even for services with only one
replica. The default is 80.
- :paramtype minimum_coverage: float
- :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each
+ :vartype minimum_coverage: float
+ :ivar order_by: The list of OData $orderby expressions by which to sort the results. Each
expression can be either a field name or a call to either the geo.distance() or the
search.score() functions. Each expression can be followed by asc to indicate ascending, or desc
to indicate descending. The default is ascending order. Ties will be broken by the match scores
of documents. If no $orderby is specified, the default sort order is descending by document
match score. There can be at most 32 $orderby clauses.
- :paramtype order_by: list[str]
- :keyword search_fields: The list of field names to search for the specified search text. Target
+ :vartype order_by: list[str]
+ :ivar search_fields: The list of field names to search for the specified search text. Target
fields must be included in the specified suggester.
- :paramtype search_fields: list[str]
- :keyword select: The list of fields to retrieve. If unspecified, only the key field will be
+ :vartype search_fields: list[str]
+ :ivar select: The list of fields to retrieve. If unspecified, only the key field will be
included in the results.
- :paramtype select: list[str]
- :keyword top: The number of suggestions to retrieve. The value must be a number between 1 and
- 100. The default is 5.
- :paramtype top: int
+ :vartype select: list[str]
+ :ivar top: The number of suggestions to retrieve. The value must be a number between 1 and 100.
+ The default is 5.
+ :vartype top: int
"""
_attribute_map = {
@@ -1029,6 +1335,43 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword filter: An OData expression that filters the documents considered for suggestions.
+ :paramtype filter: str
+ :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
+ suggestions query. Default is false. When set to true, the query will find terms even if
+ there's a substituted or missing character in the search text. While this provides a better
+ experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are
+ slower and consume more resources.
+ :paramtype use_fuzzy_matching: bool
+ :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ highlightPreTag. If omitted, hit highlighting of suggestions is disabled.
+ :paramtype highlight_post_tag: str
+ :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ highlightPostTag. If omitted, hit highlighting of suggestions is disabled.
+ :paramtype highlight_pre_tag: str
+ :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
+ that must be covered by a suggestions query in order for the query to be reported as a success.
+ This parameter can be useful for ensuring search availability even for services with only one
+ replica. The default is 80.
+ :paramtype minimum_coverage: float
+ :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each
+ expression can be either a field name or a call to either the geo.distance() or the
+ search.score() functions. Each expression can be followed by asc to indicate ascending, or desc
+ to indicate descending. The default is ascending order. Ties will be broken by the match scores
+ of documents. If no $orderby is specified, the default sort order is descending by document
+ match score. There can be at most 32 $orderby clauses.
+ :paramtype order_by: list[str]
+ :keyword search_fields: The list of field names to search for the specified search text. Target
+ fields must be included in the specified suggester.
+ :paramtype search_fields: list[str]
+ :keyword select: The list of fields to retrieve. If unspecified, only the key field will be
+ included in the results.
+ :paramtype select: list[str]
+ :keyword top: The number of suggestions to retrieve. The value must be a number between 1 and
+ 100. The default is 5.
+ :paramtype top: int
+ """
super(SuggestOptions, self).__init__(**kwargs)
self.filter = kwargs.get('filter', None)
self.use_fuzzy_matching = kwargs.get('use_fuzzy_matching', None)
@@ -1046,47 +1389,47 @@ class SuggestRequest(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword filter: An OData expression that filters the documents considered for suggestions.
- :paramtype filter: str
- :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
- suggestion query. Default is false. When set to true, the query will find suggestions even if
- there's a substituted or missing character in the search text. While this provides a better
- experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are
- slower and consume more resources.
- :paramtype use_fuzzy_matching: bool
- :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ :ivar filter: An OData expression that filters the documents considered for suggestions.
+ :vartype filter: str
+ :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestion
+ query. Default is false. When set to true, the query will find suggestions even if there's a
+ substituted or missing character in the search text. While this provides a better experience in
+ some scenarios, it comes at a performance cost as fuzzy suggestion searches are slower and
+ consume more resources.
+ :vartype use_fuzzy_matching: bool
+ :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. If omitted, hit highlighting of suggestions is disabled.
- :paramtype highlight_post_tag: str
- :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ :vartype highlight_post_tag: str
+ :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. If omitted, hit highlighting of suggestions is disabled.
- :paramtype highlight_pre_tag: str
- :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
- that must be covered by a suggestion query in order for the query to be reported as a success.
- This parameter can be useful for ensuring search availability even for services with only one
+ :vartype highlight_pre_tag: str
+ :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
+ must be covered by a suggestion query in order for the query to be reported as a success. This
+ parameter can be useful for ensuring search availability even for services with only one
replica. The default is 80.
- :paramtype minimum_coverage: float
- :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the
+ :vartype minimum_coverage: float
+ :ivar order_by: The comma-separated list of OData $orderby expressions by which to sort the
results. Each expression can be either a field name or a call to either the geo.distance() or
the search.score() functions. Each expression can be followed by asc to indicate ascending, or
desc to indicate descending. The default is ascending order. Ties will be broken by the match
scores of documents. If no $orderby is specified, the default sort order is descending by
document match score. There can be at most 32 $orderby clauses.
- :paramtype order_by: str
- :keyword search_text: Required. The search text to use to suggest documents. Must be at least 1
+ :vartype order_by: str
+ :ivar search_text: Required. The search text to use to suggest documents. Must be at least 1
character, and no more than 100 characters.
- :paramtype search_text: str
- :keyword search_fields: The comma-separated list of field names to search for the specified
- search text. Target fields must be included in the specified suggester.
- :paramtype search_fields: str
- :keyword select: The comma-separated list of fields to retrieve. If unspecified, only the key
+ :vartype search_text: str
+ :ivar search_fields: The comma-separated list of field names to search for the specified search
+ text. Target fields must be included in the specified suggester.
+ :vartype search_fields: str
+ :ivar select: The comma-separated list of fields to retrieve. If unspecified, only the key
field will be included in the results.
- :paramtype select: str
- :keyword suggester_name: Required. The name of the suggester as specified in the suggesters
+ :vartype select: str
+ :ivar suggester_name: Required. The name of the suggester as specified in the suggesters
collection that's part of the index definition.
- :paramtype suggester_name: str
- :keyword top: The number of suggestions to retrieve. This must be a value between 1 and 100.
- The default is 5.
- :paramtype top: int
+ :vartype suggester_name: str
+ :ivar top: The number of suggestions to retrieve. This must be a value between 1 and 100. The
+ default is 5.
+ :vartype top: int
"""
_validation = {
@@ -1112,6 +1455,49 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword filter: An OData expression that filters the documents considered for suggestions.
+ :paramtype filter: str
+ :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
+ suggestion query. Default is false. When set to true, the query will find suggestions even if
+ there's a substituted or missing character in the search text. While this provides a better
+ experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are
+ slower and consume more resources.
+ :paramtype use_fuzzy_matching: bool
+ :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ highlightPreTag. If omitted, hit highlighting of suggestions is disabled.
+ :paramtype highlight_post_tag: str
+ :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ highlightPostTag. If omitted, hit highlighting of suggestions is disabled.
+ :paramtype highlight_pre_tag: str
+ :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
+ that must be covered by a suggestion query in order for the query to be reported as a success.
+ This parameter can be useful for ensuring search availability even for services with only one
+ replica. The default is 80.
+ :paramtype minimum_coverage: float
+ :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the
+ results. Each expression can be either a field name or a call to either the geo.distance() or
+ the search.score() functions. Each expression can be followed by asc to indicate ascending, or
+ desc to indicate descending. The default is ascending order. Ties will be broken by the match
+ scores of documents. If no $orderby is specified, the default sort order is descending by
+ document match score. There can be at most 32 $orderby clauses.
+ :paramtype order_by: str
+ :keyword search_text: Required. The search text to use to suggest documents. Must be at least 1
+ character, and no more than 100 characters.
+ :paramtype search_text: str
+ :keyword search_fields: The comma-separated list of field names to search for the specified
+ search text. Target fields must be included in the specified suggester.
+ :paramtype search_fields: str
+ :keyword select: The comma-separated list of fields to retrieve. If unspecified, only the key
+ field will be included in the results.
+ :paramtype select: str
+ :keyword suggester_name: Required. The name of the suggester as specified in the suggesters
+ collection that's part of the index definition.
+ :paramtype suggester_name: str
+ :keyword top: The number of suggestions to retrieve. This must be a value between 1 and 100.
+ The default is 5.
+ :paramtype top: int
+ """
super(SuggestRequest, self).__init__(**kwargs)
self.filter = kwargs.get('filter', None)
self.use_fuzzy_matching = kwargs.get('use_fuzzy_matching', None)
@@ -1133,9 +1519,9 @@ class SuggestResult(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
+ :vartype additional_properties: dict[str, any]
:ivar text: Required. The text of the suggestion result.
:vartype text: str
"""
@@ -1153,6 +1539,11 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ """
super(SuggestResult, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.text = None
diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py
index e5e0ece35849..1f17711c1f28 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py
@@ -19,9 +19,9 @@ class AnswerResult(msrest.serialization.Model):
Variables are only populated by the server, and will be ignored when sending a request.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
+ :vartype additional_properties: dict[str, any]
:ivar score: The score value represents how relevant the answer is to the the query relative to
other answers returned for the query.
:vartype score: float
@@ -55,6 +55,11 @@ def __init__(
additional_properties: Optional[Dict[str, Any]] = None,
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ """
super(AnswerResult, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.score = None
@@ -90,6 +95,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(AutocompleteItem, self).__init__(**kwargs)
self.text = None
self.query_plus_text = None
@@ -98,36 +105,36 @@ def __init__(
class AutocompleteOptions(msrest.serialization.Model):
"""Parameter group.
- :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use
+ :ivar autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use
'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing
auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext".
- :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode
- :keyword filter: An OData expression that filters the documents used to produce completed terms
+ :vartype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode
+ :ivar filter: An OData expression that filters the documents used to produce completed terms
for the Autocomplete result.
- :paramtype filter: str
- :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
- autocomplete query. Default is false. When set to true, the query will find terms even if
- there's a substituted or missing character in the search text. While this provides a better
- experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are
- slower and consume more resources.
- :paramtype use_fuzzy_matching: bool
- :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ :vartype filter: str
+ :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the autocomplete
+ query. Default is false. When set to true, the query will find terms even if there's a
+ substituted or missing character in the search text. While this provides a better experience in
+ some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and
+ consume more resources.
+ :vartype use_fuzzy_matching: bool
+ :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. If omitted, hit highlighting is disabled.
- :paramtype highlight_post_tag: str
- :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ :vartype highlight_post_tag: str
+ :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. If omitted, hit highlighting is disabled.
- :paramtype highlight_pre_tag: str
- :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
- that must be covered by an autocomplete query in order for the query to be reported as a
- success. This parameter can be useful for ensuring search availability even for services with
- only one replica. The default is 80.
- :paramtype minimum_coverage: float
- :keyword search_fields: The list of field names to consider when querying for auto-completed
+ :vartype highlight_pre_tag: str
+ :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
+ must be covered by an autocomplete query in order for the query to be reported as a success.
+ This parameter can be useful for ensuring search availability even for services with only one
+ replica. The default is 80.
+ :vartype minimum_coverage: float
+ :ivar search_fields: The list of field names to consider when querying for auto-completed
terms. Target fields must be included in the specified suggester.
- :paramtype search_fields: list[str]
- :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1
- and 100. The default is 5.
- :paramtype top: int
+ :vartype search_fields: list[str]
+ :ivar top: The number of auto-completed terms to retrieve. This must be a value between 1 and
+ 100. The default is 5.
+ :vartype top: int
"""
_attribute_map = {
@@ -154,6 +161,38 @@ def __init__(
top: Optional[int] = None,
**kwargs
):
+ """
+ :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use
+ 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing
+ auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext".
+ :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode
+ :keyword filter: An OData expression that filters the documents used to produce completed terms
+ for the Autocomplete result.
+ :paramtype filter: str
+ :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
+ autocomplete query. Default is false. When set to true, the query will find terms even if
+ there's a substituted or missing character in the search text. While this provides a better
+ experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are
+ slower and consume more resources.
+ :paramtype use_fuzzy_matching: bool
+ :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ highlightPreTag. If omitted, hit highlighting is disabled.
+ :paramtype highlight_post_tag: str
+ :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ highlightPostTag. If omitted, hit highlighting is disabled.
+ :paramtype highlight_pre_tag: str
+ :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
+ that must be covered by an autocomplete query in order for the query to be reported as a
+ success. This parameter can be useful for ensuring search availability even for services with
+ only one replica. The default is 80.
+ :paramtype minimum_coverage: float
+ :keyword search_fields: The list of field names to consider when querying for auto-completed
+ terms. Target fields must be included in the specified suggester.
+ :paramtype search_fields: list[str]
+ :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1
+ and 100. The default is 5.
+ :paramtype top: int
+ """
super(AutocompleteOptions, self).__init__(**kwargs)
self.autocomplete_mode = autocomplete_mode
self.filter = filter
@@ -170,41 +209,41 @@ class AutocompleteRequest(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword search_text: Required. The search text on which to base autocomplete results.
- :paramtype search_text: str
- :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use
+ :ivar search_text: Required. The search text on which to base autocomplete results.
+ :vartype search_text: str
+ :ivar autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use
'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing
auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext".
- :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode
- :keyword filter: An OData expression that filters the documents used to produce completed terms
+ :vartype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode
+ :ivar filter: An OData expression that filters the documents used to produce completed terms
for the Autocomplete result.
- :paramtype filter: str
- :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
- autocomplete query. Default is false. When set to true, the query will autocomplete terms even
- if there's a substituted or missing character in the search text. While this provides a better
- experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are
- slower and consume more resources.
- :paramtype use_fuzzy_matching: bool
- :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ :vartype filter: str
+ :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the autocomplete
+ query. Default is false. When set to true, the query will autocomplete terms even if there's a
+ substituted or missing character in the search text. While this provides a better experience in
+ some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and
+ consume more resources.
+ :vartype use_fuzzy_matching: bool
+ :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. If omitted, hit highlighting is disabled.
- :paramtype highlight_post_tag: str
- :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ :vartype highlight_post_tag: str
+ :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. If omitted, hit highlighting is disabled.
- :paramtype highlight_pre_tag: str
- :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
- that must be covered by an autocomplete query in order for the query to be reported as a
- success. This parameter can be useful for ensuring search availability even for services with
- only one replica. The default is 80.
- :paramtype minimum_coverage: float
- :keyword search_fields: The comma-separated list of field names to consider when querying for
+ :vartype highlight_pre_tag: str
+ :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
+ must be covered by an autocomplete query in order for the query to be reported as a success.
+ This parameter can be useful for ensuring search availability even for services with only one
+ replica. The default is 80.
+ :vartype minimum_coverage: float
+ :ivar search_fields: The comma-separated list of field names to consider when querying for
auto-completed terms. Target fields must be included in the specified suggester.
- :paramtype search_fields: str
- :keyword suggester_name: Required. The name of the suggester as specified in the suggesters
+ :vartype search_fields: str
+ :ivar suggester_name: Required. The name of the suggester as specified in the suggesters
collection that's part of the index definition.
- :paramtype suggester_name: str
- :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1
- and 100. The default is 5.
- :paramtype top: int
+ :vartype suggester_name: str
+ :ivar top: The number of auto-completed terms to retrieve. This must be a value between 1 and
+ 100. The default is 5.
+ :vartype top: int
"""
_validation = {
@@ -240,6 +279,43 @@ def __init__(
top: Optional[int] = None,
**kwargs
):
+ """
+ :keyword search_text: Required. The search text on which to base autocomplete results.
+ :paramtype search_text: str
+ :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use
+ 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing
+ auto-completed terms. Possible values include: "oneTerm", "twoTerms", "oneTermWithContext".
+ :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode
+ :keyword filter: An OData expression that filters the documents used to produce completed terms
+ for the Autocomplete result.
+ :paramtype filter: str
+ :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
+ autocomplete query. Default is false. When set to true, the query will autocomplete terms even
+ if there's a substituted or missing character in the search text. While this provides a better
+ experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are
+ slower and consume more resources.
+ :paramtype use_fuzzy_matching: bool
+ :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ highlightPreTag. If omitted, hit highlighting is disabled.
+ :paramtype highlight_post_tag: str
+ :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ highlightPostTag. If omitted, hit highlighting is disabled.
+ :paramtype highlight_pre_tag: str
+ :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
+ that must be covered by an autocomplete query in order for the query to be reported as a
+ success. This parameter can be useful for ensuring search availability even for services with
+ only one replica. The default is 80.
+ :paramtype minimum_coverage: float
+ :keyword search_fields: The comma-separated list of field names to consider when querying for
+ auto-completed terms. Target fields must be included in the specified suggester.
+ :paramtype search_fields: str
+ :keyword suggester_name: Required. The name of the suggester as specified in the suggesters
+ collection that's part of the index definition.
+ :paramtype suggester_name: str
+ :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1
+ and 100. The default is 5.
+ :paramtype top: int
+ """
super(AutocompleteRequest, self).__init__(**kwargs)
self.search_text = search_text
self.autocomplete_mode = autocomplete_mode
@@ -281,6 +357,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(AutocompleteResult, self).__init__(**kwargs)
self.coverage = None
self.results = None
@@ -291,9 +369,9 @@ class CaptionResult(msrest.serialization.Model):
Variables are only populated by the server, and will be ignored when sending a request.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
+ :vartype additional_properties: dict[str, any]
:ivar text: A representative text passage extracted from the document most relevant to the
search query.
:vartype text: str
@@ -319,6 +397,11 @@ def __init__(
additional_properties: Optional[Dict[str, Any]] = None,
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ """
super(CaptionResult, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.text = None
@@ -330,9 +413,9 @@ class FacetResult(msrest.serialization.Model):
Variables are only populated by the server, and will be ignored when sending a request.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
+ :vartype additional_properties: dict[str, any]
:ivar count: The approximate count of documents falling within the bucket described by this
facet.
:vartype count: long
@@ -353,6 +436,11 @@ def __init__(
additional_properties: Optional[Dict[str, Any]] = None,
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ """
super(FacetResult, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.count = None
@@ -361,12 +449,12 @@ def __init__(
class IndexAction(msrest.serialization.Model):
"""Represents an index action that operates on a document.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
- :keyword action_type: The operation to perform on a document in an indexing batch. Possible
- values include: "upload", "merge", "mergeOrUpload", "delete".
- :paramtype action_type: str or ~azure.search.documents.models.IndexActionType
+ :vartype additional_properties: dict[str, any]
+ :ivar action_type: The operation to perform on a document in an indexing batch. Possible values
+ include: "upload", "merge", "mergeOrUpload", "delete".
+ :vartype action_type: str or ~azure.search.documents.models.IndexActionType
"""
_attribute_map = {
@@ -381,6 +469,14 @@ def __init__(
action_type: Optional[Union[str, "IndexActionType"]] = None,
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ :keyword action_type: The operation to perform on a document in an indexing batch. Possible
+ values include: "upload", "merge", "mergeOrUpload", "delete".
+ :paramtype action_type: str or ~azure.search.documents.models.IndexActionType
+ """
super(IndexAction, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.action_type = action_type
@@ -391,8 +487,8 @@ class IndexBatch(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword actions: Required. The actions in the batch.
- :paramtype actions: list[~azure.search.documents.models.IndexAction]
+ :ivar actions: Required. The actions in the batch.
+ :vartype actions: list[~azure.search.documents.models.IndexAction]
"""
_validation = {
@@ -409,6 +505,10 @@ def __init__(
actions: List["IndexAction"],
**kwargs
):
+ """
+ :keyword actions: Required. The actions in the batch.
+ :paramtype actions: list[~azure.search.documents.models.IndexAction]
+ """
super(IndexBatch, self).__init__(**kwargs)
self.actions = actions
@@ -437,6 +537,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(IndexDocumentsResult, self).__init__(**kwargs)
self.results = None
@@ -481,6 +583,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(IndexingResult, self).__init__(**kwargs)
self.key = None
self.error_message = None
@@ -491,8 +595,8 @@ def __init__(
class RequestOptions(msrest.serialization.Model):
"""Parameter group.
- :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
- :paramtype x_ms_client_request_id: str
+ :ivar x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
+ :vartype x_ms_client_request_id: str
"""
_attribute_map = {
@@ -505,6 +609,10 @@ def __init__(
x_ms_client_request_id: Optional[str] = None,
**kwargs
):
+ """
+ :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
+ :paramtype x_ms_client_request_id: str
+ """
super(RequestOptions, self).__init__(**kwargs)
self.x_ms_client_request_id = x_ms_client_request_id
@@ -568,6 +676,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchDocumentsResult, self).__init__(**kwargs)
self.count = None
self.coverage = None
@@ -609,6 +719,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchError, self).__init__(**kwargs)
self.code = None
self.message = None
@@ -618,98 +730,97 @@ def __init__(
class SearchOptions(msrest.serialization.Model):
"""Parameter group.
- :keyword include_total_result_count: A value that specifies whether to fetch the total count of
+ :ivar include_total_result_count: A value that specifies whether to fetch the total count of
results. Default is false. Setting this value to true may have a performance impact. Note that
the count returned is an approximation.
- :paramtype include_total_result_count: bool
- :keyword facets: The list of facet expressions to apply to the search query. Each facet
- expression contains a field name, optionally followed by a comma-separated list of name:value
- pairs.
- :paramtype facets: list[str]
- :keyword filter: The OData $filter expression to apply to the search query.
- :paramtype filter: str
- :keyword highlight_fields: The list of field names to use for hit highlights. Only searchable
+ :vartype include_total_result_count: bool
+ :ivar facets: The list of facet expressions to apply to the search query. Each facet expression
+ contains a field name, optionally followed by a comma-separated list of name:value pairs.
+ :vartype facets: list[str]
+ :ivar filter: The OData $filter expression to apply to the search query.
+ :vartype filter: str
+ :ivar highlight_fields: The list of field names to use for hit highlights. Only searchable
fields can be used for hit highlighting.
- :paramtype highlight_fields: list[str]
- :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ :vartype highlight_fields: list[str]
+ :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. Default is </em>.
- :paramtype highlight_post_tag: str
- :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ :vartype highlight_post_tag: str
+ :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. Default is <em>.
- :paramtype highlight_pre_tag: str
- :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
- that must be covered by a search query in order for the query to be reported as a success. This
+ :vartype highlight_pre_tag: str
+ :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
+ must be covered by a search query in order for the query to be reported as a success. This
parameter can be useful for ensuring search availability even for services with only one
replica. The default is 100.
- :paramtype minimum_coverage: float
- :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each
+ :vartype minimum_coverage: float
+ :ivar order_by: The list of OData $orderby expressions by which to sort the results. Each
expression can be either a field name or a call to either the geo.distance() or the
search.score() functions. Each expression can be followed by asc to indicate ascending, and
desc to indicate descending. The default is ascending order. Ties will be broken by the match
scores of documents. If no OrderBy is specified, the default sort order is descending by
document match score. There can be at most 32 $orderby clauses.
- :paramtype order_by: list[str]
- :keyword query_type: A value that specifies the syntax of the search query. The default is
+ :vartype order_by: list[str]
+ :ivar query_type: A value that specifies the syntax of the search query. The default is
'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include:
"simple", "full", "semantic".
- :paramtype query_type: str or ~azure.search.documents.models.QueryType
- :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for
+ :vartype query_type: str or ~azure.search.documents.models.QueryType
+ :ivar scoring_parameters: The list of parameter values to be used in scoring functions (for
example, referencePointParameter) using the format name-values. For example, if the scoring
profile defines a function with a parameter called 'mylocation' the parameter string would be
"mylocation--122.2,44.8" (without the quotes).
- :paramtype scoring_parameters: list[str]
- :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching
+ :vartype scoring_parameters: list[str]
+ :ivar scoring_profile: The name of a scoring profile to evaluate match scores for matching
documents in order to sort the results.
- :paramtype scoring_profile: str
- :keyword search_fields: The list of field names to which to scope the full-text search. When
- using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of
- each fielded search expression take precedence over any field names listed in this parameter.
- :paramtype search_fields: list[str]
- :keyword query_language: The language of the query. Possible values include: "none", "en-us".
- :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage
- :keyword speller: Improve search recall by spell-correcting individual search query terms.
+ :vartype scoring_profile: str
+ :ivar search_fields: The list of field names to which to scope the full-text search. When using
+ fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each
+ fielded search expression take precedence over any field names listed in this parameter.
+ :vartype search_fields: list[str]
+ :ivar query_language: The language of the query. Possible values include: "none", "en-us".
+ :vartype query_language: str or ~azure.search.documents.models.QueryLanguage
+ :ivar speller: Improve search recall by spell-correcting individual search query terms.
Possible values include: "none", "lexicon".
- :paramtype speller: str or ~azure.search.documents.models.Speller
- :keyword answers: This parameter is only valid if the query type is 'semantic'. If set, the
- query returns answers extracted from key passages in the highest ranked documents. The number
- of answers returned can be configured by appending the pipe character '|' followed by the
+ :vartype speller: str or ~azure.search.documents.models.Speller
+ :ivar answers: This parameter is only valid if the query type is 'semantic'. If set, the query
+ returns answers extracted from key passages in the highest ranked documents. The number of
+ answers returned can be configured by appending the pipe character '|' followed by the
'count-:code:``' option after the answers parameter value, such as
'extractive|count-3'. Default count is 1. Possible values include: "none", "extractive".
- :paramtype answers: str or ~azure.search.documents.models.Answers
- :keyword search_mode: A value that specifies whether any or all of the search terms must be
+ :vartype answers: str or ~azure.search.documents.models.Answers
+ :ivar search_mode: A value that specifies whether any or all of the search terms must be
matched in order to count the document as a match. Possible values include: "any", "all".
- :paramtype search_mode: str or ~azure.search.documents.models.SearchMode
- :keyword scoring_statistics: A value that specifies whether we want to calculate scoring
+ :vartype search_mode: str or ~azure.search.documents.models.SearchMode
+ :ivar scoring_statistics: A value that specifies whether we want to calculate scoring
statistics (such as document frequency) globally for more consistent scoring, or locally, for
lower latency. Possible values include: "local", "global".
- :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics
- :keyword session_id: A value to be used to create a sticky session, which can help to get more
+ :vartype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics
+ :ivar session_id: A value to be used to create a sticky session, which can help to get more
consistent results. As long as the same sessionId is used, a best-effort attempt will be made
to target the same replica set. Be wary that reusing the same sessionID values repeatedly can
interfere with the load balancing of the requests across replicas and adversely affect the
performance of the search service. The value used as sessionId cannot start with a '_'
character.
- :paramtype session_id: str
- :keyword select: The list of fields to retrieve. If unspecified, all fields marked as
- retrievable in the schema are included.
- :paramtype select: list[str]
- :keyword skip: The number of search results to skip. This value cannot be greater than 100,000.
- If you need to scan documents in sequence, but cannot use $skip due to this limitation,
- consider using $orderby on a totally-ordered key and $filter with a range query instead.
- :paramtype skip: int
- :keyword top: The number of search results to retrieve. This can be used in conjunction with
- $skip to implement client-side paging of search results. If results are truncated due to
- server-side paging, the response will include a continuation token that can be used to issue
- another Search request for the next page of results.
- :paramtype top: int
- :keyword captions: This parameter is only valid if the query type is 'semantic'. If set, the
- query returns captions extracted from key passages in the highest ranked documents. When
- Captions is set to 'extractive', highlighting is enabled by default, and can be configured by
- appending the pipe character '|' followed by the 'highlight-' option, such as
+ :vartype session_id: str
+ :ivar select: The list of fields to retrieve. If unspecified, all fields marked as retrievable
+ in the schema are included.
+ :vartype select: list[str]
+ :ivar skip: The number of search results to skip. This value cannot be greater than 100,000. If
+ you need to scan documents in sequence, but cannot use $skip due to this limitation, consider
+ using $orderby on a totally-ordered key and $filter with a range query instead.
+ :vartype skip: int
+ :ivar top: The number of search results to retrieve. This can be used in conjunction with $skip
+ to implement client-side paging of search results. If results are truncated due to server-side
+ paging, the response will include a continuation token that can be used to issue another Search
+ request for the next page of results.
+ :vartype top: int
+ :ivar captions: This parameter is only valid if the query type is 'semantic'. If set, the query
+ returns captions extracted from key passages in the highest ranked documents. When Captions is
+ set to 'extractive', highlighting is enabled by default, and can be configured by appending the
+ pipe character '|' followed by the 'highlight-' option, such as
'extractive|highlight-true'. Defaults to 'None'. Possible values include: "none", "extractive".
- :paramtype captions: str or ~azure.search.documents.models.Captions
- :keyword semantic_fields: The list of field names used for semantic search.
- :paramtype semantic_fields: list[str]
+ :vartype captions: str or ~azure.search.documents.models.Captions
+ :ivar semantic_fields: The list of field names used for semantic search.
+ :vartype semantic_fields: list[str]
"""
_attribute_map = {
@@ -766,6 +877,100 @@ def __init__(
semantic_fields: Optional[List[str]] = None,
**kwargs
):
+ """
+ :keyword include_total_result_count: A value that specifies whether to fetch the total count of
+ results. Default is false. Setting this value to true may have a performance impact. Note that
+ the count returned is an approximation.
+ :paramtype include_total_result_count: bool
+ :keyword facets: The list of facet expressions to apply to the search query. Each facet
+ expression contains a field name, optionally followed by a comma-separated list of name:value
+ pairs.
+ :paramtype facets: list[str]
+ :keyword filter: The OData $filter expression to apply to the search query.
+ :paramtype filter: str
+ :keyword highlight_fields: The list of field names to use for hit highlights. Only searchable
+ fields can be used for hit highlighting.
+ :paramtype highlight_fields: list[str]
+ :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ highlightPreTag. Default is </em>.
+ :paramtype highlight_post_tag: str
+ :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ highlightPostTag. Default is <em>.
+ :paramtype highlight_pre_tag: str
+ :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
+ that must be covered by a search query in order for the query to be reported as a success. This
+ parameter can be useful for ensuring search availability even for services with only one
+ replica. The default is 100.
+ :paramtype minimum_coverage: float
+ :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each
+ expression can be either a field name or a call to either the geo.distance() or the
+ search.score() functions. Each expression can be followed by asc to indicate ascending, and
+ desc to indicate descending. The default is ascending order. Ties will be broken by the match
+ scores of documents. If no OrderBy is specified, the default sort order is descending by
+ document match score. There can be at most 32 $orderby clauses.
+ :paramtype order_by: list[str]
+ :keyword query_type: A value that specifies the syntax of the search query. The default is
+ 'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include:
+ "simple", "full", "semantic".
+ :paramtype query_type: str or ~azure.search.documents.models.QueryType
+ :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for
+ example, referencePointParameter) using the format name-values. For example, if the scoring
+ profile defines a function with a parameter called 'mylocation' the parameter string would be
+ "mylocation--122.2,44.8" (without the quotes).
+ :paramtype scoring_parameters: list[str]
+ :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching
+ documents in order to sort the results.
+ :paramtype scoring_profile: str
+ :keyword search_fields: The list of field names to which to scope the full-text search. When
+ using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of
+ each fielded search expression take precedence over any field names listed in this parameter.
+ :paramtype search_fields: list[str]
+ :keyword query_language: The language of the query. Possible values include: "none", "en-us".
+ :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage
+ :keyword speller: Improve search recall by spell-correcting individual search query terms.
+ Possible values include: "none", "lexicon".
+ :paramtype speller: str or ~azure.search.documents.models.Speller
+ :keyword answers: This parameter is only valid if the query type is 'semantic'. If set, the
+ query returns answers extracted from key passages in the highest ranked documents. The number
+ of answers returned can be configured by appending the pipe character '|' followed by the
+ 'count-:code:``' option after the answers parameter value, such as
+ 'extractive|count-3'. Default count is 1. Possible values include: "none", "extractive".
+ :paramtype answers: str or ~azure.search.documents.models.Answers
+ :keyword search_mode: A value that specifies whether any or all of the search terms must be
+ matched in order to count the document as a match. Possible values include: "any", "all".
+ :paramtype search_mode: str or ~azure.search.documents.models.SearchMode
+ :keyword scoring_statistics: A value that specifies whether we want to calculate scoring
+ statistics (such as document frequency) globally for more consistent scoring, or locally, for
+ lower latency. Possible values include: "local", "global".
+ :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics
+ :keyword session_id: A value to be used to create a sticky session, which can help to get more
+ consistent results. As long as the same sessionId is used, a best-effort attempt will be made
+ to target the same replica set. Be wary that reusing the same sessionID values repeatedly can
+ interfere with the load balancing of the requests across replicas and adversely affect the
+ performance of the search service. The value used as sessionId cannot start with a '_'
+ character.
+ :paramtype session_id: str
+ :keyword select: The list of fields to retrieve. If unspecified, all fields marked as
+ retrievable in the schema are included.
+ :paramtype select: list[str]
+ :keyword skip: The number of search results to skip. This value cannot be greater than 100,000.
+ If you need to scan documents in sequence, but cannot use $skip due to this limitation,
+ consider using $orderby on a totally-ordered key and $filter with a range query instead.
+ :paramtype skip: int
+ :keyword top: The number of search results to retrieve. This can be used in conjunction with
+ $skip to implement client-side paging of search results. If results are truncated due to
+ server-side paging, the response will include a continuation token that can be used to issue
+ another Search request for the next page of results.
+ :paramtype top: int
+ :keyword captions: This parameter is only valid if the query type is 'semantic'. If set, the
+ query returns captions extracted from key passages in the highest ranked documents. When
+ Captions is set to 'extractive', highlighting is enabled by default, and can be configured by
+ appending the pipe character '|' followed by the 'highlight-' option, such as
+ 'extractive|highlight-true'. Defaults to 'None'. Possible values include: "none", "extractive".
+ :paramtype captions: str or ~azure.search.documents.models.Captions
+ :keyword semantic_fields: The list of field names used for semantic search.
+ :paramtype semantic_fields: list[str]
+ """
super(SearchOptions, self).__init__(**kwargs)
self.include_total_result_count = include_total_result_count
self.facets = facets
@@ -795,99 +1000,98 @@ def __init__(
class SearchRequest(msrest.serialization.Model):
"""Parameters for filtering, sorting, faceting, paging, and other search query behaviors.
- :keyword include_total_result_count: A value that specifies whether to fetch the total count of
+ :ivar include_total_result_count: A value that specifies whether to fetch the total count of
results. Default is false. Setting this value to true may have a performance impact. Note that
the count returned is an approximation.
- :paramtype include_total_result_count: bool
- :keyword facets: The list of facet expressions to apply to the search query. Each facet
- expression contains a field name, optionally followed by a comma-separated list of name:value
- pairs.
- :paramtype facets: list[str]
- :keyword filter: The OData $filter expression to apply to the search query.
- :paramtype filter: str
- :keyword highlight_fields: The comma-separated list of field names to use for hit highlights.
- Only searchable fields can be used for hit highlighting.
- :paramtype highlight_fields: str
- :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ :vartype include_total_result_count: bool
+ :ivar facets: The list of facet expressions to apply to the search query. Each facet expression
+ contains a field name, optionally followed by a comma-separated list of name:value pairs.
+ :vartype facets: list[str]
+ :ivar filter: The OData $filter expression to apply to the search query.
+ :vartype filter: str
+ :ivar highlight_fields: The comma-separated list of field names to use for hit highlights. Only
+ searchable fields can be used for hit highlighting.
+ :vartype highlight_fields: str
+ :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. Default is </em>.
- :paramtype highlight_post_tag: str
- :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ :vartype highlight_post_tag: str
+ :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. Default is <em>.
- :paramtype highlight_pre_tag: str
- :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
- that must be covered by a search query in order for the query to be reported as a success. This
+ :vartype highlight_pre_tag: str
+ :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
+ must be covered by a search query in order for the query to be reported as a success. This
parameter can be useful for ensuring search availability even for services with only one
replica. The default is 100.
- :paramtype minimum_coverage: float
- :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the
+ :vartype minimum_coverage: float
+ :ivar order_by: The comma-separated list of OData $orderby expressions by which to sort the
results. Each expression can be either a field name or a call to either the geo.distance() or
the search.score() functions. Each expression can be followed by asc to indicate ascending, or
desc to indicate descending. The default is ascending order. Ties will be broken by the match
scores of documents. If no $orderby is specified, the default sort order is descending by
document match score. There can be at most 32 $orderby clauses.
- :paramtype order_by: str
- :keyword query_type: A value that specifies the syntax of the search query. The default is
+ :vartype order_by: str
+ :ivar query_type: A value that specifies the syntax of the search query. The default is
'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include:
"simple", "full", "semantic".
- :paramtype query_type: str or ~azure.search.documents.models.QueryType
- :keyword scoring_statistics: A value that specifies whether we want to calculate scoring
+ :vartype query_type: str or ~azure.search.documents.models.QueryType
+ :ivar scoring_statistics: A value that specifies whether we want to calculate scoring
statistics (such as document frequency) globally for more consistent scoring, or locally, for
lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally
before scoring. Using global scoring statistics can increase latency of search queries.
Possible values include: "local", "global".
- :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics
- :keyword session_id: A value to be used to create a sticky session, which can help getting more
+ :vartype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics
+ :ivar session_id: A value to be used to create a sticky session, which can help getting more
consistent results. As long as the same sessionId is used, a best-effort attempt will be made
to target the same replica set. Be wary that reusing the same sessionID values repeatedly can
interfere with the load balancing of the requests across replicas and adversely affect the
performance of the search service. The value used as sessionId cannot start with a '_'
character.
- :paramtype session_id: str
- :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for
+ :vartype session_id: str
+ :ivar scoring_parameters: The list of parameter values to be used in scoring functions (for
example, referencePointParameter) using the format name-values. For example, if the scoring
profile defines a function with a parameter called 'mylocation' the parameter string would be
"mylocation--122.2,44.8" (without the quotes).
- :paramtype scoring_parameters: list[str]
- :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching
+ :vartype scoring_parameters: list[str]
+ :ivar scoring_profile: The name of a scoring profile to evaluate match scores for matching
documents in order to sort the results.
- :paramtype scoring_profile: str
- :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to
- match all documents.
- :paramtype search_text: str
- :keyword search_fields: The comma-separated list of field names to which to scope the full-text
+ :vartype scoring_profile: str
+ :ivar search_text: A full-text search query expression; Use "*" or omit this parameter to match
+ all documents.
+ :vartype search_text: str
+ :ivar search_fields: The comma-separated list of field names to which to scope the full-text
search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the
field names of each fielded search expression take precedence over any field names listed in
this parameter.
- :paramtype search_fields: str
- :keyword search_mode: A value that specifies whether any or all of the search terms must be
+ :vartype search_fields: str
+ :ivar search_mode: A value that specifies whether any or all of the search terms must be
matched in order to count the document as a match. Possible values include: "any", "all".
- :paramtype search_mode: str or ~azure.search.documents.models.SearchMode
- :keyword query_language: A value that specifies the language of the search query. Possible
- values include: "none", "en-us".
- :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage
- :keyword speller: A value that specified the type of the speller to use to spell-correct
+ :vartype search_mode: str or ~azure.search.documents.models.SearchMode
+ :ivar query_language: A value that specifies the language of the search query. Possible values
+ include: "none", "en-us".
+ :vartype query_language: str or ~azure.search.documents.models.QueryLanguage
+ :ivar speller: A value that specified the type of the speller to use to spell-correct
individual search query terms. Possible values include: "none", "lexicon".
- :paramtype speller: str or ~azure.search.documents.models.Speller
- :keyword answers: A value that specifies whether answers should be returned as part of the
- search response. Possible values include: "none", "extractive".
- :paramtype answers: str or ~azure.search.documents.models.Answers
- :keyword select: The comma-separated list of fields to retrieve. If unspecified, all fields
- marked as retrievable in the schema are included.
- :paramtype select: str
- :keyword skip: The number of search results to skip. This value cannot be greater than 100,000.
- If you need to scan documents in sequence, but cannot use skip due to this limitation, consider
+ :vartype speller: str or ~azure.search.documents.models.QuerySpellerType
+ :ivar answers: A value that specifies whether answers should be returned as part of the search
+ response. Possible values include: "none", "extractive".
+ :vartype answers: str or ~azure.search.documents.models.QueryAnswerType
+ :ivar select: The comma-separated list of fields to retrieve. If unspecified, all fields marked
+ as retrievable in the schema are included.
+ :vartype select: str
+ :ivar skip: The number of search results to skip. This value cannot be greater than 100,000. If
+ you need to scan documents in sequence, but cannot use skip due to this limitation, consider
using orderby on a totally-ordered key and filter with a range query instead.
- :paramtype skip: int
- :keyword top: The number of search results to retrieve. This can be used in conjunction with
- $skip to implement client-side paging of search results. If results are truncated due to
- server-side paging, the response will include a continuation token that can be used to issue
- another Search request for the next page of results.
- :paramtype top: int
- :keyword captions: A value that specifies whether captions should be returned as part of the
+ :vartype skip: int
+ :ivar top: The number of search results to retrieve. This can be used in conjunction with $skip
+ to implement client-side paging of search results. If results are truncated due to server-side
+ paging, the response will include a continuation token that can be used to issue another Search
+ request for the next page of results.
+ :vartype top: int
+ :ivar captions: A value that specifies whether captions should be returned as part of the
search response. Possible values include: "none", "extractive".
- :paramtype captions: str or ~azure.search.documents.models.Captions
- :keyword semantic_fields: The comma-separated list of field names used for semantic search.
- :paramtype semantic_fields: str
+ :vartype captions: str or ~azure.search.documents.models.QueryCaptionType
+ :ivar semantic_fields: The comma-separated list of field names used for semantic search.
+ :vartype semantic_fields: str
"""
_attribute_map = {
@@ -937,15 +1141,110 @@ def __init__(
search_fields: Optional[str] = None,
search_mode: Optional[Union[str, "SearchMode"]] = None,
query_language: Optional[Union[str, "QueryLanguage"]] = None,
- speller: Optional[Union[str, "Speller"]] = None,
- answers: Optional[Union[str, "Answers"]] = None,
+ speller: Optional[Union[str, "QuerySpellerType"]] = None,
+ answers: Optional[Union[str, "QueryAnswerType"]] = None,
select: Optional[str] = None,
skip: Optional[int] = None,
top: Optional[int] = None,
- captions: Optional[Union[str, "Captions"]] = None,
+ captions: Optional[Union[str, "QueryCaptionType"]] = None,
semantic_fields: Optional[str] = None,
**kwargs
):
+ """
+ :keyword include_total_result_count: A value that specifies whether to fetch the total count of
+ results. Default is false. Setting this value to true may have a performance impact. Note that
+ the count returned is an approximation.
+ :paramtype include_total_result_count: bool
+ :keyword facets: The list of facet expressions to apply to the search query. Each facet
+ expression contains a field name, optionally followed by a comma-separated list of name:value
+ pairs.
+ :paramtype facets: list[str]
+ :keyword filter: The OData $filter expression to apply to the search query.
+ :paramtype filter: str
+ :keyword highlight_fields: The comma-separated list of field names to use for hit highlights.
+ Only searchable fields can be used for hit highlighting.
+ :paramtype highlight_fields: str
+ :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ highlightPreTag. Default is </em>.
+ :paramtype highlight_post_tag: str
+ :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ highlightPostTag. Default is <em>.
+ :paramtype highlight_pre_tag: str
+ :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
+ that must be covered by a search query in order for the query to be reported as a success. This
+ parameter can be useful for ensuring search availability even for services with only one
+ replica. The default is 100.
+ :paramtype minimum_coverage: float
+ :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the
+ results. Each expression can be either a field name or a call to either the geo.distance() or
+ the search.score() functions. Each expression can be followed by asc to indicate ascending, or
+ desc to indicate descending. The default is ascending order. Ties will be broken by the match
+ scores of documents. If no $orderby is specified, the default sort order is descending by
+ document match score. There can be at most 32 $orderby clauses.
+ :paramtype order_by: str
+ :keyword query_type: A value that specifies the syntax of the search query. The default is
+ 'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include:
+ "simple", "full", "semantic".
+ :paramtype query_type: str or ~azure.search.documents.models.QueryType
+ :keyword scoring_statistics: A value that specifies whether we want to calculate scoring
+ statistics (such as document frequency) globally for more consistent scoring, or locally, for
+ lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally
+ before scoring. Using global scoring statistics can increase latency of search queries.
+ Possible values include: "local", "global".
+ :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics
+ :keyword session_id: A value to be used to create a sticky session, which can help getting more
+ consistent results. As long as the same sessionId is used, a best-effort attempt will be made
+ to target the same replica set. Be wary that reusing the same sessionID values repeatedly can
+ interfere with the load balancing of the requests across replicas and adversely affect the
+ performance of the search service. The value used as sessionId cannot start with a '_'
+ character.
+ :paramtype session_id: str
+ :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for
+ example, referencePointParameter) using the format name-values. For example, if the scoring
+ profile defines a function with a parameter called 'mylocation' the parameter string would be
+ "mylocation--122.2,44.8" (without the quotes).
+ :paramtype scoring_parameters: list[str]
+ :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching
+ documents in order to sort the results.
+ :paramtype scoring_profile: str
+ :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to
+ match all documents.
+ :paramtype search_text: str
+ :keyword search_fields: The comma-separated list of field names to which to scope the full-text
+ search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the
+ field names of each fielded search expression take precedence over any field names listed in
+ this parameter.
+ :paramtype search_fields: str
+ :keyword search_mode: A value that specifies whether any or all of the search terms must be
+ matched in order to count the document as a match. Possible values include: "any", "all".
+ :paramtype search_mode: str or ~azure.search.documents.models.SearchMode
+ :keyword query_language: A value that specifies the language of the search query. Possible
+ values include: "none", "en-us".
+ :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage
+ :keyword speller: A value that specified the type of the speller to use to spell-correct
+ individual search query terms. Possible values include: "none", "lexicon".
+ :paramtype speller: str or ~azure.search.documents.models.QuerySpellerType
+ :keyword answers: A value that specifies whether answers should be returned as part of the
+ search response. Possible values include: "none", "extractive".
+ :paramtype answers: str or ~azure.search.documents.models.QueryAnswerType
+ :keyword select: The comma-separated list of fields to retrieve. If unspecified, all fields
+ marked as retrievable in the schema are included.
+ :paramtype select: str
+ :keyword skip: The number of search results to skip. This value cannot be greater than 100,000.
+ If you need to scan documents in sequence, but cannot use skip due to this limitation, consider
+ using orderby on a totally-ordered key and filter with a range query instead.
+ :paramtype skip: int
+ :keyword top: The number of search results to retrieve. This can be used in conjunction with
+ $skip to implement client-side paging of search results. If results are truncated due to
+ server-side paging, the response will include a continuation token that can be used to issue
+ another Search request for the next page of results.
+ :paramtype top: int
+ :keyword captions: A value that specifies whether captions should be returned as part of the
+ search response. Possible values include: "none", "extractive".
+ :paramtype captions: str or ~azure.search.documents.models.QueryCaptionType
+ :keyword semantic_fields: The comma-separated list of field names used for semantic search.
+ :paramtype semantic_fields: str
+ """
super(SearchRequest, self).__init__(**kwargs)
self.include_total_result_count = include_total_result_count
self.facets = facets
@@ -980,9 +1279,9 @@ class SearchResult(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
+ :vartype additional_properties: dict[str, any]
:ivar score: Required. The relevance score of the document compared to other documents returned
by the query.
:vartype score: float
@@ -1020,6 +1319,11 @@ def __init__(
additional_properties: Optional[Dict[str, Any]] = None,
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ """
super(SearchResult, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.score = None
@@ -1056,6 +1360,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SuggestDocumentsResult, self).__init__(**kwargs)
self.results = None
self.coverage = None
@@ -1064,41 +1370,41 @@ def __init__(
class SuggestOptions(msrest.serialization.Model):
"""Parameter group.
- :keyword filter: An OData expression that filters the documents considered for suggestions.
- :paramtype filter: str
- :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
- suggestions query. Default is false. When set to true, the query will find terms even if
- there's a substituted or missing character in the search text. While this provides a better
- experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are
- slower and consume more resources.
- :paramtype use_fuzzy_matching: bool
- :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ :ivar filter: An OData expression that filters the documents considered for suggestions.
+ :vartype filter: str
+ :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestions
+ query. Default is false. When set to true, the query will find terms even if there's a
+ substituted or missing character in the search text. While this provides a better experience in
+ some scenarios, it comes at a performance cost as fuzzy suggestions queries are slower and
+ consume more resources.
+ :vartype use_fuzzy_matching: bool
+ :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. If omitted, hit highlighting of suggestions is disabled.
- :paramtype highlight_post_tag: str
- :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ :vartype highlight_post_tag: str
+ :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. If omitted, hit highlighting of suggestions is disabled.
- :paramtype highlight_pre_tag: str
- :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
- that must be covered by a suggestions query in order for the query to be reported as a success.
- This parameter can be useful for ensuring search availability even for services with only one
+ :vartype highlight_pre_tag: str
+ :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
+ must be covered by a suggestions query in order for the query to be reported as a success. This
+ parameter can be useful for ensuring search availability even for services with only one
replica. The default is 80.
- :paramtype minimum_coverage: float
- :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each
+ :vartype minimum_coverage: float
+ :ivar order_by: The list of OData $orderby expressions by which to sort the results. Each
expression can be either a field name or a call to either the geo.distance() or the
search.score() functions. Each expression can be followed by asc to indicate ascending, or desc
to indicate descending. The default is ascending order. Ties will be broken by the match scores
of documents. If no $orderby is specified, the default sort order is descending by document
match score. There can be at most 32 $orderby clauses.
- :paramtype order_by: list[str]
- :keyword search_fields: The list of field names to search for the specified search text. Target
+ :vartype order_by: list[str]
+ :ivar search_fields: The list of field names to search for the specified search text. Target
fields must be included in the specified suggester.
- :paramtype search_fields: list[str]
- :keyword select: The list of fields to retrieve. If unspecified, only the key field will be
+ :vartype search_fields: list[str]
+ :ivar select: The list of fields to retrieve. If unspecified, only the key field will be
included in the results.
- :paramtype select: list[str]
- :keyword top: The number of suggestions to retrieve. The value must be a number between 1 and
- 100. The default is 5.
- :paramtype top: int
+ :vartype select: list[str]
+ :ivar top: The number of suggestions to retrieve. The value must be a number between 1 and 100.
+ The default is 5.
+ :vartype top: int
"""
_attribute_map = {
@@ -1127,6 +1433,43 @@ def __init__(
top: Optional[int] = None,
**kwargs
):
+ """
+ :keyword filter: An OData expression that filters the documents considered for suggestions.
+ :paramtype filter: str
+ :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
+ suggestions query. Default is false. When set to true, the query will find terms even if
+ there's a substituted or missing character in the search text. While this provides a better
+ experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are
+ slower and consume more resources.
+ :paramtype use_fuzzy_matching: bool
+ :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ highlightPreTag. If omitted, hit highlighting of suggestions is disabled.
+ :paramtype highlight_post_tag: str
+ :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ highlightPostTag. If omitted, hit highlighting of suggestions is disabled.
+ :paramtype highlight_pre_tag: str
+ :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
+ that must be covered by a suggestions query in order for the query to be reported as a success.
+ This parameter can be useful for ensuring search availability even for services with only one
+ replica. The default is 80.
+ :paramtype minimum_coverage: float
+ :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each
+ expression can be either a field name or a call to either the geo.distance() or the
+ search.score() functions. Each expression can be followed by asc to indicate ascending, or desc
+ to indicate descending. The default is ascending order. Ties will be broken by the match scores
+ of documents. If no $orderby is specified, the default sort order is descending by document
+ match score. There can be at most 32 $orderby clauses.
+ :paramtype order_by: list[str]
+ :keyword search_fields: The list of field names to search for the specified search text. Target
+ fields must be included in the specified suggester.
+ :paramtype search_fields: list[str]
+ :keyword select: The list of fields to retrieve. If unspecified, only the key field will be
+ included in the results.
+ :paramtype select: list[str]
+ :keyword top: The number of suggestions to retrieve. The value must be a number between 1 and
+ 100. The default is 5.
+ :paramtype top: int
+ """
super(SuggestOptions, self).__init__(**kwargs)
self.filter = filter
self.use_fuzzy_matching = use_fuzzy_matching
@@ -1144,47 +1487,47 @@ class SuggestRequest(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword filter: An OData expression that filters the documents considered for suggestions.
- :paramtype filter: str
- :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
- suggestion query. Default is false. When set to true, the query will find suggestions even if
- there's a substituted or missing character in the search text. While this provides a better
- experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are
- slower and consume more resources.
- :paramtype use_fuzzy_matching: bool
- :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ :ivar filter: An OData expression that filters the documents considered for suggestions.
+ :vartype filter: str
+ :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestion
+ query. Default is false. When set to true, the query will find suggestions even if there's a
+ substituted or missing character in the search text. While this provides a better experience in
+ some scenarios, it comes at a performance cost as fuzzy suggestion searches are slower and
+ consume more resources.
+ :vartype use_fuzzy_matching: bool
+ :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. If omitted, hit highlighting of suggestions is disabled.
- :paramtype highlight_post_tag: str
- :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ :vartype highlight_post_tag: str
+ :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. If omitted, hit highlighting of suggestions is disabled.
- :paramtype highlight_pre_tag: str
- :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
- that must be covered by a suggestion query in order for the query to be reported as a success.
- This parameter can be useful for ensuring search availability even for services with only one
+ :vartype highlight_pre_tag: str
+ :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
+ must be covered by a suggestion query in order for the query to be reported as a success. This
+ parameter can be useful for ensuring search availability even for services with only one
replica. The default is 80.
- :paramtype minimum_coverage: float
- :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the
+ :vartype minimum_coverage: float
+ :ivar order_by: The comma-separated list of OData $orderby expressions by which to sort the
results. Each expression can be either a field name or a call to either the geo.distance() or
the search.score() functions. Each expression can be followed by asc to indicate ascending, or
desc to indicate descending. The default is ascending order. Ties will be broken by the match
scores of documents. If no $orderby is specified, the default sort order is descending by
document match score. There can be at most 32 $orderby clauses.
- :paramtype order_by: str
- :keyword search_text: Required. The search text to use to suggest documents. Must be at least 1
+ :vartype order_by: str
+ :ivar search_text: Required. The search text to use to suggest documents. Must be at least 1
character, and no more than 100 characters.
- :paramtype search_text: str
- :keyword search_fields: The comma-separated list of field names to search for the specified
- search text. Target fields must be included in the specified suggester.
- :paramtype search_fields: str
- :keyword select: The comma-separated list of fields to retrieve. If unspecified, only the key
+ :vartype search_text: str
+ :ivar search_fields: The comma-separated list of field names to search for the specified search
+ text. Target fields must be included in the specified suggester.
+ :vartype search_fields: str
+ :ivar select: The comma-separated list of fields to retrieve. If unspecified, only the key
field will be included in the results.
- :paramtype select: str
- :keyword suggester_name: Required. The name of the suggester as specified in the suggesters
+ :vartype select: str
+ :ivar suggester_name: Required. The name of the suggester as specified in the suggesters
collection that's part of the index definition.
- :paramtype suggester_name: str
- :keyword top: The number of suggestions to retrieve. This must be a value between 1 and 100.
- The default is 5.
- :paramtype top: int
+ :vartype suggester_name: str
+ :ivar top: The number of suggestions to retrieve. This must be a value between 1 and 100. The
+ default is 5.
+ :vartype top: int
"""
_validation = {
@@ -1222,6 +1565,49 @@ def __init__(
top: Optional[int] = None,
**kwargs
):
+ """
+ :keyword filter: An OData expression that filters the documents considered for suggestions.
+ :paramtype filter: str
+ :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
+ suggestion query. Default is false. When set to true, the query will find suggestions even if
+ there's a substituted or missing character in the search text. While this provides a better
+ experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are
+ slower and consume more resources.
+ :paramtype use_fuzzy_matching: bool
+ :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
+ highlightPreTag. If omitted, hit highlighting of suggestions is disabled.
+ :paramtype highlight_post_tag: str
+ :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
+ highlightPostTag. If omitted, hit highlighting of suggestions is disabled.
+ :paramtype highlight_pre_tag: str
+ :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
+ that must be covered by a suggestion query in order for the query to be reported as a success.
+ This parameter can be useful for ensuring search availability even for services with only one
+ replica. The default is 80.
+ :paramtype minimum_coverage: float
+ :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the
+ results. Each expression can be either a field name or a call to either the geo.distance() or
+ the search.score() functions. Each expression can be followed by asc to indicate ascending, or
+ desc to indicate descending. The default is ascending order. Ties will be broken by the match
+ scores of documents. If no $orderby is specified, the default sort order is descending by
+ document match score. There can be at most 32 $orderby clauses.
+ :paramtype order_by: str
+ :keyword search_text: Required. The search text to use to suggest documents. Must be at least 1
+ character, and no more than 100 characters.
+ :paramtype search_text: str
+ :keyword search_fields: The comma-separated list of field names to search for the specified
+ search text. Target fields must be included in the specified suggester.
+ :paramtype search_fields: str
+ :keyword select: The comma-separated list of fields to retrieve. If unspecified, only the key
+ field will be included in the results.
+ :paramtype select: str
+ :keyword suggester_name: Required. The name of the suggester as specified in the suggesters
+ collection that's part of the index definition.
+ :paramtype suggester_name: str
+ :keyword top: The number of suggestions to retrieve. This must be a value between 1 and 100.
+ The default is 5.
+ :paramtype top: int
+ """
super(SuggestRequest, self).__init__(**kwargs)
self.filter = filter
self.use_fuzzy_matching = use_fuzzy_matching
@@ -1243,9 +1629,9 @@ class SuggestResult(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
+ :vartype additional_properties: dict[str, any]
:ivar text: Required. The text of the suggestion result.
:vartype text: str
"""
@@ -1265,6 +1651,11 @@ def __init__(
additional_properties: Optional[Dict[str, Any]] = None,
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ """
super(SuggestResult, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.text = None
diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_client_enums.py
index 29a5bdc2c30d..5a07c4944d99 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_client_enums.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_client_enums.py
@@ -12,12 +12,6 @@
class Answers(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
- """This parameter is only valid if the query type is 'semantic'. If set, the query returns answers
- extracted from key passages in the highest ranked documents. The number of answers returned can
- be configured by appending the pipe character '|' followed by the 'count-:code:``' option after the answers parameter value, such as 'extractive|count-3'. Default
- count is 1.
- """
#: Do not return answers for the query.
NONE = "none"
@@ -43,12 +37,6 @@ class AutocompleteMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ONE_TERM_WITH_CONTEXT = "oneTermWithContext"
class Captions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
- """This parameter is only valid if the query type is 'semantic'. If set, the query returns
- captions extracted from key passages in the highest ranked documents. When Captions is set to
- 'extractive', highlighting is enabled by default, and can be configured by appending the pipe
- character '|' followed by the 'highlight-' option, such as
- 'extractive|highlight-true'. Defaults to 'None'.
- """
#: Do not return captions for the query.
NONE = "none"
@@ -75,6 +63,34 @@ class IndexActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: document, use merge instead and set the field explicitly to null.
DELETE = "delete"
+class QueryAnswerType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
+ """This parameter is only valid if the query type is 'semantic'. If set, the query returns answers
+ extracted from key passages in the highest ranked documents. The number of answers returned can
+ be configured by appending the pipe character '|' followed by the 'count-:code:``' option after the answers parameter value, such as 'extractive|count-3'. Default
+ count is 1.
+ """
+
+ #: Do not return answers for the query.
+ NONE = "none"
+ #: Extracts answer candidates from the contents of the documents returned in response to a query
+ #: expressed as a question in natural language.
+ EXTRACTIVE = "extractive"
+
+class QueryCaptionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
+ """This parameter is only valid if the query type is 'semantic'. If set, the query returns
+ captions extracted from key passages in the highest ranked documents. When Captions is set to
+ 'extractive', highlighting is enabled by default, and can be configured by appending the pipe
+ character '|' followed by the 'highlight-' option, such as
+ 'extractive|highlight-true'. Defaults to 'None'.
+ """
+
+ #: Do not return captions for the query.
+ NONE = "none"
+ #: Extracts captions from the matching documents that contain passages relevant to the search
+ #: query.
+ EXTRACTIVE = "extractive"
+
class QueryLanguage(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The language of the query.
"""
@@ -84,6 +100,16 @@ class QueryLanguage(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: English.
EN_US = "en-us"
+class QuerySpellerType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
+ """Improve search recall by spell-correcting individual search query terms.
+ """
+
+ #: Speller not enabled.
+ NONE = "none"
+ #: Speller corrects individual query terms using a static lexicon for the language specified by
+ #: the queryLanguage parameter.
+ LEXICON = "lexicon"
+
class QueryType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query
uses the Lucene query syntax and 'semantic' if query syntax is not needed.
@@ -125,8 +151,6 @@ class SearchMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ALL = "all"
class Speller(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
- """Improve search recall by spell-correcting individual search query terms.
- """
#: Speller not enabled.
NONE = "none"
diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py
index 39c09ccee60a..be94d149dd8e 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py
@@ -12,12 +12,12 @@
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
-from azure.core.pipeline.transport._base import _format_url_section
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
+from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
@@ -502,19 +502,20 @@ def count(
request = build_count_request(
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.count.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('long', pipeline_response)
@@ -634,19 +635,20 @@ def search_get(
semantic_fields=_semantic_fields,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.search_get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchDocumentsResult', pipeline_response)
@@ -696,19 +698,20 @@ def search_post(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.search_post.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchDocumentsResult', pipeline_response)
@@ -759,19 +762,20 @@ def get(
selected_fields=selected_fields,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('object', pipeline_response)
@@ -854,19 +858,20 @@ def suggest_get(
top=_top,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.suggest_get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response)
@@ -916,19 +921,20 @@ def suggest_post(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.suggest_post.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response)
@@ -979,19 +985,20 @@ def index(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.index.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 207]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
@@ -1074,19 +1081,20 @@ def autocomplete_get(
search_fields=_search_fields,
top=_top,
template_url=self.autocomplete_get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AutocompleteResult', pipeline_response)
@@ -1136,19 +1144,20 @@ def autocomplete_post(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.autocomplete_post.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
"indexName": self._serialize.url("self._config.index_name", self._config.index_name, 'str'),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AutocompleteResult', pipeline_response)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py
index 68a1e180bb42..79fce398aad8 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py
@@ -132,7 +132,7 @@ def get_document(self, key, selected_fields=None, **kwargs):
:param key: The primary key value for the document to retrieve
:type key: str
- :param selected_fields: a whitelist of fields to include in the results
+ :param selected_fields: a allowlist of fields to include in the results
:type selected_fields: List[str]
:rtype: dict
@@ -202,17 +202,17 @@ def search(self, search_text, **kwargs): # pylint:disable=too-many-locals
:paramtype query_language: str or ~azure.search.documents.models.QueryLanguage
:keyword query_speller: A value that specified the type of the speller to use to spell-correct
individual search query terms. Possible values include: "none", "lexicon".
- :paramtype query_speller: str or ~azure.search.documents.models.Speller
+ :paramtype query_speller: str or ~azure.search.documents.models.QuerySpellerType
:keyword query_answer: This parameter is only valid if the query type is 'semantic'. If set,
the query returns answers extracted from key passages in the highest ranked documents.
Possible values include: "none", "extractive".
- :paramtype query_answer: str or ~azure.search.documents.models.Answers
+ :paramtype query_answer: str or ~azure.search.documents.models.QueryAnswerType
:keyword int query_answer_count: This parameter is only valid if the query type is 'semantic' and
query answer is 'extractive'. Configures the number of answers returned. Default count is 1.
:keyword query_caption: This parameter is only valid if the query type is 'semantic'. If set, the
query returns captions extracted from key passages in the highest ranked documents.
Defaults to 'None'. Possible values include: "none", "extractive".
- :paramtype query_caption: str or ~azure.search.documents.models.Captions
+ :paramtype query_caption: str or ~azure.search.documents.models.QueryCaptionType
:keyword bool query_caption_highlight: This parameter is only valid if the query type is 'semantic' when
query caption is set to 'extractive'. Determines whether highlighting is enabled.
Defaults to 'true'.
diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py
index e7f66ca59ae0..65a51b4610da 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py
@@ -109,7 +109,7 @@ async def get_document(self, key, selected_fields=None, **kwargs):
:param key: The primary key value for the document to retrieve
:type key: str
- :param selected_fields: a whitelist of fields to include in the results
+ :param selected_fields: a allowlist of fields to include in the results
:type selected_fields: List[str]
:rtype: dict
@@ -180,18 +180,18 @@ async def search(self, search_text, **kwargs): # pylint:disable=too-many-locals
:paramtype query_language: str or ~azure.search.documents.models.QueryLanguage
:keyword query_speller: A value that specified the type of the speller to use to spell-correct
individual search query terms. Possible values include: "none", "lexicon".
- :paramtype query_speller: str or ~azure.search.documents.models.Speller
+ :paramtype query_speller: str or ~azure.search.documents.models.QuerySpellerType
:keyword query_answer: This parameter is only valid if the query type is 'semantic'. If set,
the query returns answers extracted from key passages in the highest ranked documents.
Possible values include: "none", "extractive".
- :paramtype query_answer: str or ~azure.search.documents.models.Answers
+ :paramtype query_answer: str or ~azure.search.documents.models.QueryAnswerType
:keyword int query_answer_count: This parameter is only valid if the query type is 'semantic' and
query answer is 'extractive'.
Configures the number of answers returned. Default count is 1.
:keyword query_caption: This parameter is only valid if the query type is 'semantic'. If set, the
query returns captions extracted from key passages in the highest ranked documents.
Defaults to 'None'. Possible values include: "none", "extractive".
- :paramtype query_caption: str or ~azure.search.documents.models.Captions
+ :paramtype query_caption: str or ~azure.search.documents.models.QueryCaptionType
:keyword bool query_caption_highlight: This parameter is only valid if the query type is 'semantic' when
query caption is set to 'extractive'. Determines whether highlighting is enabled.
Defaults to 'true'.
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_vendor.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_vendor.py
new file mode 100644
index 000000000000..138f663c53a4
--- /dev/null
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_vendor.py
@@ -0,0 +1,27 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.pipeline.transport import HttpRequest
+
+def _convert_request(request, files=None):
+ data = request.content if not files else None
+ request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data)
+ if files:
+ request.set_formdata_body(files)
+ return request
+
+def _format_url_section(template, **kwargs):
+ components = template.split("/")
+ while components:
+ try:
+ return template.format(**kwargs)
+ except KeyError as key:
+ formatted_components = template.split("/")
+ components = [
+ c for c in formatted_components if "{}".format(key.args[0]) not in c
+ ]
+ template = "/".join(components)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py
index 8f4a05b35c10..b2f776ae8c12 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py
@@ -16,6 +16,7 @@
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
+from ..._vendor import _convert_request
from ...operations._data_sources_operations import build_create_or_update_request, build_create_request, build_delete_request, build_get_request, build_list_request
T = TypeVar('T')
@@ -50,7 +51,7 @@ async def create_or_update(
data_source: "_models.SearchIndexerDataSource",
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
- ignore_reset_requirements: Optional[bool] = None,
+ skip_indexer_reset_requirement_for_cache: Optional[bool] = None,
request_options: Optional["_models.RequestOptions"] = None,
**kwargs: Any
) -> "_models.SearchIndexerDataSource":
@@ -66,8 +67,8 @@ async def create_or_update(
:param if_none_match: Defines the If-None-Match condition. The operation will be performed only
if the ETag on the server does not match this value.
:type if_none_match: str
- :param ignore_reset_requirements: Ignores cache reset requirements.
- :type ignore_reset_requirements: bool
+ :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements.
+ :type skip_indexer_reset_requirement_for_cache: bool
:param request_options: Parameter group.
:type request_options: ~azure.search.documents.indexes.models.RequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
@@ -94,21 +95,22 @@ async def create_or_update(
x_ms_client_request_id=_x_ms_client_request_id,
if_match=if_match,
if_none_match=if_none_match,
- ignore_reset_requirements=ignore_reset_requirements,
+ skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache,
json=json,
template_url=self.create_or_update.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
@@ -167,18 +169,19 @@ async def delete(
if_match=if_match,
if_none_match=if_none_match,
template_url=self.delete.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -219,18 +222,19 @@ async def get(
data_source_name=data_source_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response)
@@ -277,18 +281,19 @@ async def list(
select=select,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.list.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ListDataSourcesResult', pipeline_response)
@@ -337,18 +342,19 @@ async def create(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.create.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py
index 31bdc5b09925..fd8df28bdb71 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py
@@ -16,6 +16,7 @@
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
+from ..._vendor import _convert_request
from ...operations._indexers_operations import build_create_or_update_request, build_create_request, build_delete_request, build_get_request, build_get_status_request, build_list_request, build_reset_docs_request, build_reset_request, build_run_request
T = TypeVar('T')
@@ -75,18 +76,19 @@ async def reset(
indexer_name=indexer_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.reset.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -100,7 +102,7 @@ async def reset_docs(
self,
indexer_name: str,
overwrite: Optional[bool] = False,
- keys_or_ids: Optional["_models.Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema"] = None,
+ keys_or_ids: Optional["_models.DocumentKeysOrIds"] = None,
request_options: Optional["_models.RequestOptions"] = None,
**kwargs: Any
) -> None:
@@ -112,8 +114,7 @@ async def reset_docs(
keys or ids in this payload will be queued to be re-ingested.
:type overwrite: bool
:param keys_or_ids:
- :type keys_or_ids:
- ~azure.search.documents.indexes.models.Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema
+ :type keys_or_ids: ~azure.search.documents.indexes.models.DocumentKeysOrIds
:param request_options: Parameter group.
:type request_options: ~azure.search.documents.indexes.models.RequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
@@ -133,7 +134,7 @@ async def reset_docs(
if request_options is not None:
_x_ms_client_request_id = request_options.x_ms_client_request_id
if keys_or_ids is not None:
- json = self._serialize.body(keys_or_ids, 'Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema')
+ json = self._serialize.body(keys_or_ids, 'DocumentKeysOrIds')
else:
json = None
@@ -144,18 +145,19 @@ async def reset_docs(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.reset_docs.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -196,18 +198,19 @@ async def run(
indexer_name=indexer_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.run.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -223,8 +226,8 @@ async def create_or_update(
indexer: "_models.SearchIndexer",
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
+ skip_indexer_reset_requirement_for_cache: Optional[bool] = None,
disable_cache_reprocessing_change_detection: Optional[bool] = None,
- ignore_reset_requirements: Optional[bool] = None,
request_options: Optional["_models.RequestOptions"] = None,
**kwargs: Any
) -> "_models.SearchIndexer":
@@ -240,11 +243,11 @@ async def create_or_update(
:param if_none_match: Defines the If-None-Match condition. The operation will be performed only
if the ETag on the server does not match this value.
:type if_none_match: str
+ :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements.
+ :type skip_indexer_reset_requirement_for_cache: bool
:param disable_cache_reprocessing_change_detection: Disables cache reprocessing change
detection.
:type disable_cache_reprocessing_change_detection: bool
- :param ignore_reset_requirements: Ignores cache reset requirements.
- :type ignore_reset_requirements: bool
:param request_options: Parameter group.
:type request_options: ~azure.search.documents.indexes.models.RequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
@@ -271,22 +274,23 @@ async def create_or_update(
x_ms_client_request_id=_x_ms_client_request_id,
if_match=if_match,
if_none_match=if_none_match,
+ skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache,
disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection,
- ignore_reset_requirements=ignore_reset_requirements,
json=json,
template_url=self.create_or_update.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
@@ -345,18 +349,19 @@ async def delete(
if_match=if_match,
if_none_match=if_none_match,
template_url=self.delete.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -397,18 +402,19 @@ async def get(
indexer_name=indexer_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexer', pipeline_response)
@@ -455,18 +461,19 @@ async def list(
select=select,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.list.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ListIndexersResult', pipeline_response)
@@ -515,18 +522,19 @@ async def create(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.create.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexer', pipeline_response)
@@ -571,18 +579,19 @@ async def get_status(
indexer_name=indexer_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get_status.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexerStatus', pipeline_response)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py
index b392a4a7e928..eb788bd34818 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py
@@ -18,6 +18,7 @@
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
+from ..._vendor import _convert_request
from ...operations._indexes_operations import build_analyze_request, build_create_or_update_request, build_create_request, build_delete_request, build_get_request, build_get_statistics_request, build_list_request
T = TypeVar('T')
@@ -81,18 +82,19 @@ async def create(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.create.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndex', pipeline_response)
@@ -141,7 +143,8 @@ def prepare_request(next_link=None):
select=select,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.list.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
@@ -156,7 +159,8 @@ def prepare_request(next_link=None):
select=select,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=next_link,
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
@@ -183,7 +187,7 @@ async def get_next(next_link=None):
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
return pipeline_response
@@ -252,18 +256,19 @@ async def create_or_update(
if_none_match=if_none_match,
json=json,
template_url=self.create_or_update.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
@@ -324,18 +329,19 @@ async def delete(
if_match=if_match,
if_none_match=if_none_match,
template_url=self.delete.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -376,18 +382,19 @@ async def get(
index_name=index_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndex', pipeline_response)
@@ -432,18 +439,19 @@ async def get_statistics(
index_name=index_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get_statistics.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('GetIndexStatisticsResult', pipeline_response)
@@ -496,18 +504,19 @@ async def analyze(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.analyze.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AnalyzeResult', pipeline_response)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_client_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_client_operations.py
index 259e7f045d3c..1ae2ee9e4c00 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_client_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_client_operations.py
@@ -16,6 +16,7 @@
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
+from ..._vendor import _convert_request
from ...operations._search_client_operations import build_get_service_statistics_request
T = TypeVar('T')
@@ -51,18 +52,19 @@ async def get_service_statistics(
request = build_get_service_statistics_request(
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get_service_statistics.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ServiceStatistics', pipeline_response)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py
index 23db88c2f37a..222b83a7461d 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py
@@ -6,7 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
-from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
+from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
@@ -16,7 +16,8 @@
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
-from ...operations._skillsets_operations import build_create_or_update_request, build_create_request, build_delete_request, build_get_request, build_list_request
+from ..._vendor import _convert_request
+from ...operations._skillsets_operations import build_create_or_update_request, build_create_request, build_delete_request, build_get_request, build_list_request, build_reset_skills_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -50,8 +51,8 @@ async def create_or_update(
skillset: "_models.SearchIndexerSkillset",
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
+ skip_indexer_reset_requirement_for_cache: Optional[bool] = None,
disable_cache_reprocessing_change_detection: Optional[bool] = None,
- ignore_reset_requirements: Optional[bool] = None,
request_options: Optional["_models.RequestOptions"] = None,
**kwargs: Any
) -> "_models.SearchIndexerSkillset":
@@ -68,11 +69,11 @@ async def create_or_update(
:param if_none_match: Defines the If-None-Match condition. The operation will be performed only
if the ETag on the server does not match this value.
:type if_none_match: str
+ :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements.
+ :type skip_indexer_reset_requirement_for_cache: bool
:param disable_cache_reprocessing_change_detection: Disables cache reprocessing change
detection.
:type disable_cache_reprocessing_change_detection: bool
- :param ignore_reset_requirements: Ignores cache reset requirements.
- :type ignore_reset_requirements: bool
:param request_options: Parameter group.
:type request_options: ~azure.search.documents.indexes.models.RequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
@@ -99,22 +100,23 @@ async def create_or_update(
x_ms_client_request_id=_x_ms_client_request_id,
if_match=if_match,
if_none_match=if_none_match,
+ skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache,
disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection,
- ignore_reset_requirements=ignore_reset_requirements,
json=json,
template_url=self.create_or_update.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
@@ -173,18 +175,19 @@ async def delete(
if_match=if_match,
if_none_match=if_none_match,
template_url=self.delete.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -225,18 +228,19 @@ async def get(
skillset_name=skillset_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response)
@@ -283,18 +287,19 @@ async def list(
select=select,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.list.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ListSkillsetsResult', pipeline_response)
@@ -343,18 +348,19 @@ async def create(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.create.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response)
@@ -366,3 +372,65 @@ async def create(
create.metadata = {'url': '/skillsets'} # type: ignore
+
+ @distributed_trace_async
+ async def reset_skills(
+ self,
+ skillset_name: str,
+ skill_names: Optional[List[str]] = None,
+ request_options: Optional["_models.RequestOptions"] = None,
+ **kwargs: Any
+ ) -> None:
+ """Reset an existing skillset in a search service.
+
+ :param skillset_name: The name of the skillset to reset.
+ :type skillset_name: str
+ :param skill_names: the names of skills to be reset.
+ :type skill_names: list[str]
+ :param request_options: Parameter group.
+ :type request_options: ~azure.search.documents.indexes.models.RequestOptions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
+
+ _x_ms_client_request_id = None
+ if request_options is not None:
+ _x_ms_client_request_id = request_options.x_ms_client_request_id
+ _skill_names = _models.SkillNames(skill_names=skill_names)
+ json = self._serialize.body(_skill_names, 'SkillNames')
+
+ request = build_reset_skills_request(
+ skillset_name=skillset_name,
+ content_type=content_type,
+ x_ms_client_request_id=_x_ms_client_request_id,
+ json=json,
+ template_url=self.reset_skills.metadata['url'],
+ )
+ request = _convert_request(request)
+ path_format_arguments = {
+ "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
+ }
+ request.url = self._client.format_url(request.url, **path_format_arguments)
+
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
+ raise HttpResponseError(response=response, model=error)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ reset_skills.metadata = {'url': '/skillsets(\'{skillsetName}\')/search.resetskills'} # type: ignore
+
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py
index 78036d8873d1..de86d3288d0e 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py
@@ -16,6 +16,7 @@
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
+from ..._vendor import _convert_request
from ...operations._synonym_maps_operations import build_create_or_update_request, build_create_request, build_delete_request, build_get_request, build_list_request
T = TypeVar('T')
@@ -93,18 +94,19 @@ async def create_or_update(
if_none_match=if_none_match,
json=json,
template_url=self.create_or_update.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
@@ -163,18 +165,19 @@ async def delete(
if_match=if_match,
if_none_match=if_none_match,
template_url=self.delete.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -215,18 +218,19 @@ async def get(
synonym_map_name=synonym_map_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SynonymMap', pipeline_response)
@@ -273,18 +277,19 @@ async def list(
select=select,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.list.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ListSynonymMapsResult', pipeline_response)
@@ -333,18 +338,19 @@ async def create(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.create.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = await self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SynonymMap', pipeline_response)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py
index c358e0dbef5a..cc1a6e75a0d6 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py
@@ -35,6 +35,7 @@
from ._models_py3 import DistanceScoringFunction
from ._models_py3 import DistanceScoringParameters
from ._models_py3 import DocumentExtractionSkill
+ from ._models_py3 import DocumentKeysOrIds
from ._models_py3 import EdgeNGramTokenFilter
from ._models_py3 import EdgeNGramTokenFilterV2
from ._models_py3 import EdgeNGramTokenizer
@@ -87,7 +88,6 @@
from ._models_py3 import OutputFieldMappingEntry
from ._models_py3 import PIIDetectionSkill
from ._models_py3 import PathHierarchyTokenizerV2
- from ._models_py3 import Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema
from ._models_py3 import PatternAnalyzer
from ._models_py3 import PatternCaptureTokenFilter
from ._models_py3 import PatternReplaceCharFilter
@@ -130,6 +130,7 @@
from ._models_py3 import ShaperSkill
from ._models_py3 import ShingleTokenFilter
from ._models_py3 import Similarity
+ from ._models_py3 import SkillNames
from ._models_py3 import SnowballTokenFilter
from ._models_py3 import SoftDeleteColumnDeletionDetectionPolicy
from ._models_py3 import SplitSkill
@@ -180,6 +181,7 @@
from ._models import DistanceScoringFunction # type: ignore
from ._models import DistanceScoringParameters # type: ignore
from ._models import DocumentExtractionSkill # type: ignore
+ from ._models import DocumentKeysOrIds # type: ignore
from ._models import EdgeNGramTokenFilter # type: ignore
from ._models import EdgeNGramTokenFilterV2 # type: ignore
from ._models import EdgeNGramTokenizer # type: ignore
@@ -232,7 +234,6 @@
from ._models import OutputFieldMappingEntry # type: ignore
from ._models import PIIDetectionSkill # type: ignore
from ._models import PathHierarchyTokenizerV2 # type: ignore
- from ._models import Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema # type: ignore
from ._models import PatternAnalyzer # type: ignore
from ._models import PatternCaptureTokenFilter # type: ignore
from ._models import PatternReplaceCharFilter # type: ignore
@@ -275,6 +276,7 @@
from ._models import ShaperSkill # type: ignore
from ._models import ShingleTokenFilter # type: ignore
from ._models import Similarity # type: ignore
+ from ._models import SkillNames # type: ignore
from ._models import SnowballTokenFilter # type: ignore
from ._models import SoftDeleteColumnDeletionDetectionPolicy # type: ignore
from ._models import SplitSkill # type: ignore
@@ -371,6 +373,7 @@
'DistanceScoringFunction',
'DistanceScoringParameters',
'DocumentExtractionSkill',
+ 'DocumentKeysOrIds',
'EdgeNGramTokenFilter',
'EdgeNGramTokenFilterV2',
'EdgeNGramTokenizer',
@@ -423,7 +426,6 @@
'OutputFieldMappingEntry',
'PIIDetectionSkill',
'PathHierarchyTokenizerV2',
- 'Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema',
'PatternAnalyzer',
'PatternCaptureTokenFilter',
'PatternReplaceCharFilter',
@@ -466,6 +468,7 @@
'ShaperSkill',
'ShingleTokenFilter',
'Similarity',
+ 'SkillNames',
'SnowballTokenFilter',
'SoftDeleteColumnDeletionDetectionPolicy',
'SplitSkill',
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models.py
index a4a0654cb0cc..efb7f70d0e7d 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models.py
@@ -48,6 +48,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(AnalyzedTokenInfo, self).__init__(**kwargs)
self.token = None
self.start_offset = None
@@ -60,9 +62,9 @@ class AnalyzeRequest(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword text: Required. The text to break into tokens.
- :paramtype text: str
- :keyword analyzer: The name of the analyzer to use to break the given text. Possible values
+ :ivar text: Required. The text to break into tokens.
+ :vartype text: str
+ :ivar analyzer: The name of the analyzer to use to break the given text. Possible values
include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft",
"bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene",
"zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene",
@@ -80,20 +82,19 @@ class AnalyzeRequest(msrest.serialization.Model):
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
- :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
- :keyword tokenizer: The name of the tokenizer to use to break the given text. Possible values
+ :vartype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :ivar tokenizer: The name of the tokenizer to use to break the given text. Possible values
include: "classic", "edgeNGram", "keyword_v2", "letter", "lowercase",
"microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram",
"path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace".
- :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
- :keyword normalizer: The name of the normalizer to use to normalize the given text. Possible
+ :vartype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
+ :ivar normalizer: The name of the normalizer to use to normalize the given text. Possible
values include: "asciifolding", "elision", "lowercase", "standard", "uppercase".
- :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
- :keyword token_filters: An optional list of token filters to use when breaking the given text.
- :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
- :keyword char_filters: An optional list of character filters to use when breaking the given
- text.
- :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
+ :vartype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
+ :ivar token_filters: An optional list of token filters to use when breaking the given text.
+ :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
+ :ivar char_filters: An optional list of character filters to use when breaking the given text.
+ :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
"""
_validation = {
@@ -113,6 +114,42 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword text: Required. The text to break into tokens.
+ :paramtype text: str
+ :keyword analyzer: The name of the analyzer to use to break the given text. Possible values
+ include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft",
+ "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene",
+ "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene",
+ "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene",
+ "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene",
+ "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft",
+ "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft",
+ "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene",
+ "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft",
+ "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene",
+ "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft",
+ "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene",
+ "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
+ "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
+ "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
+ "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
+ "whitespace".
+ :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :keyword tokenizer: The name of the tokenizer to use to break the given text. Possible values
+ include: "classic", "edgeNGram", "keyword_v2", "letter", "lowercase",
+ "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram",
+ "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace".
+ :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
+ :keyword normalizer: The name of the normalizer to use to normalize the given text. Possible
+ values include: "asciifolding", "elision", "lowercase", "standard", "uppercase".
+ :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
+ :keyword token_filters: An optional list of token filters to use when breaking the given text.
+ :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
+ :keyword char_filters: An optional list of character filters to use when breaking the given
+ text.
+ :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
+ """
super(AnalyzeRequest, self).__init__(**kwargs)
self.text = kwargs['text']
self.analyzer = kwargs.get('analyzer', None)
@@ -127,9 +164,8 @@ class AnalyzeResult(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword tokens: Required. The list of tokens returned by the analyzer specified in the
- request.
- :paramtype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo]
+ :ivar tokens: Required. The list of tokens returned by the analyzer specified in the request.
+ :vartype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo]
"""
_validation = {
@@ -144,6 +180,11 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword tokens: Required. The list of tokens returned by the analyzer specified in the
+ request.
+ :paramtype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo]
+ """
super(AnalyzeResult, self).__init__(**kwargs)
self.tokens = kwargs['tokens']
@@ -156,13 +197,13 @@ class TokenFilter(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
+ :vartype name: str
"""
_validation = {
@@ -183,6 +224,12 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ """
super(TokenFilter, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = kwargs['name']
@@ -193,16 +240,16 @@ class AsciiFoldingTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword preserve_original: A value indicating whether the original token will be kept. Default
- is false.
- :paramtype preserve_original: bool
+ :vartype name: str
+ :ivar preserve_original: A value indicating whether the original token will be kept. Default is
+ false.
+ :vartype preserve_original: bool
"""
_validation = {
@@ -220,6 +267,15 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword preserve_original: A value indicating whether the original token will be kept. Default
+ is false.
+ :paramtype preserve_original: bool
+ """
super(AsciiFoldingTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.AsciiFoldingTokenFilter' # type: str
self.preserve_original = kwargs.get('preserve_original', False)
@@ -230,12 +286,12 @@ class AzureActiveDirectoryApplicationCredentials(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword application_id: Required. An AAD Application ID that was granted the required access
+ :ivar application_id: Required. An AAD Application ID that was granted the required access
permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The
Application ID should not be confused with the Object ID for your AAD Application.
- :paramtype application_id: str
- :keyword application_secret: The authentication key of the specified AAD application.
- :paramtype application_secret: str
+ :vartype application_id: str
+ :ivar application_secret: The authentication key of the specified AAD application.
+ :vartype application_secret: str
"""
_validation = {
@@ -251,6 +307,14 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword application_id: Required. An AAD Application ID that was granted the required access
+ permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The
+ Application ID should not be confused with the Object ID for your AAD Application.
+ :paramtype application_id: str
+ :keyword application_secret: The authentication key of the specified AAD application.
+ :paramtype application_secret: str
+ """
super(AzureActiveDirectoryApplicationCredentials, self).__init__(**kwargs)
self.application_id = kwargs['application_id']
self.application_secret = kwargs.get('application_secret', None)
@@ -264,8 +328,8 @@ class Similarity(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Constant filled by server.
- :paramtype odata_type: str
+ :ivar odata_type: Required. Constant filled by server.
+ :vartype odata_type: str
"""
_validation = {
@@ -284,6 +348,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(Similarity, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
@@ -293,16 +359,16 @@ class BM25Similarity(Similarity):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Constant filled by server.
- :paramtype odata_type: str
- :keyword k1: This property controls the scaling function between the term frequency of each
+ :ivar odata_type: Required. Constant filled by server.
+ :vartype odata_type: str
+ :ivar k1: This property controls the scaling function between the term frequency of each
matching terms and the final relevance score of a document-query pair. By default, a value of
1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency.
- :paramtype k1: float
- :keyword b: This property controls how the length of a document affects the relevance score. By
+ :vartype k1: float
+ :ivar b: This property controls how the length of a document affects the relevance score. By
default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied,
while a value of 1.0 means the score is fully normalized by the length of the document.
- :paramtype b: float
+ :vartype b: float
"""
_validation = {
@@ -319,6 +385,16 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword k1: This property controls the scaling function between the term frequency of each
+ matching terms and the final relevance score of a document-query pair. By default, a value of
+ 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency.
+ :paramtype k1: float
+ :keyword b: This property controls how the length of a document affects the relevance score. By
+ default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied,
+ while a value of 1.0 means the score is fully normalized by the length of the document.
+ :paramtype b: float
+ """
super(BM25Similarity, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.BM25Similarity' # type: str
self.k1 = kwargs.get('k1', None)
@@ -333,13 +409,13 @@ class CharFilter(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the char filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the char filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the char filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
+ :vartype name: str
"""
_validation = {
@@ -360,6 +436,12 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the char filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ """
super(CharFilter, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = kwargs['name']
@@ -370,19 +452,19 @@ class CjkBigramTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword ignore_scripts: The scripts to ignore.
- :paramtype ignore_scripts: list[str or
+ :vartype name: str
+ :ivar ignore_scripts: The scripts to ignore.
+ :vartype ignore_scripts: list[str or
~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts]
- :keyword output_unigrams: A value indicating whether to output both unigrams and bigrams (if
+ :ivar output_unigrams: A value indicating whether to output both unigrams and bigrams (if
true), or just bigrams (if false). Default is false.
- :paramtype output_unigrams: bool
+ :vartype output_unigrams: bool
"""
_validation = {
@@ -401,6 +483,18 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword ignore_scripts: The scripts to ignore.
+ :paramtype ignore_scripts: list[str or
+ ~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts]
+ :keyword output_unigrams: A value indicating whether to output both unigrams and bigrams (if
+ true), or just bigrams (if false). Default is false.
+ :paramtype output_unigrams: bool
+ """
super(CjkBigramTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.CjkBigramTokenFilter' # type: str
self.ignore_scripts = kwargs.get('ignore_scripts', None)
@@ -412,8 +506,8 @@ class ClassicSimilarity(Similarity):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Constant filled by server.
- :paramtype odata_type: str
+ :ivar odata_type: Required. Constant filled by server.
+ :vartype odata_type: str
"""
_validation = {
@@ -428,6 +522,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(ClassicSimilarity, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.ClassicSimilarity' # type: str
@@ -440,13 +536,13 @@ class LexicalTokenizer(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
"""
_validation = {
@@ -467,6 +563,12 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ """
super(LexicalTokenizer, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = kwargs['name']
@@ -477,16 +579,16 @@ class ClassicTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
- :paramtype max_token_length: int
+ :vartype max_token_length: int
"""
_validation = {
@@ -505,6 +607,15 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ maximum length are split. The maximum token length that can be used is 300 characters.
+ :paramtype max_token_length: int
+ """
super(ClassicTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.ClassicTokenizer' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
@@ -518,11 +629,11 @@ class CognitiveServicesAccount(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the cognitive service resource
+ :ivar odata_type: Required. Identifies the concrete type of the cognitive service resource
attached to a skillset.Constant filled by server.
- :paramtype odata_type: str
- :keyword description: Description of the cognitive service resource attached to a skillset.
- :paramtype description: str
+ :vartype odata_type: str
+ :ivar description: Description of the cognitive service resource attached to a skillset.
+ :vartype description: str
"""
_validation = {
@@ -542,6 +653,10 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword description: Description of the cognitive service resource attached to a skillset.
+ :paramtype description: str
+ """
super(CognitiveServicesAccount, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.description = kwargs.get('description', None)
@@ -552,14 +667,14 @@ class CognitiveServicesAccountKey(CognitiveServicesAccount):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the cognitive service resource
+ :ivar odata_type: Required. Identifies the concrete type of the cognitive service resource
attached to a skillset.Constant filled by server.
- :paramtype odata_type: str
- :keyword description: Description of the cognitive service resource attached to a skillset.
- :paramtype description: str
- :keyword key: Required. The key used to provision the cognitive service resource attached to a
+ :vartype odata_type: str
+ :ivar description: Description of the cognitive service resource attached to a skillset.
+ :vartype description: str
+ :ivar key: Required. The key used to provision the cognitive service resource attached to a
skillset.
- :paramtype key: str
+ :vartype key: str
"""
_validation = {
@@ -577,6 +692,13 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword description: Description of the cognitive service resource attached to a skillset.
+ :paramtype description: str
+ :keyword key: Required. The key used to provision the cognitive service resource attached to a
+ skillset.
+ :paramtype key: str
+ """
super(CognitiveServicesAccountKey, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.CognitiveServicesByKey' # type: str
self.key = kwargs['key']
@@ -587,22 +709,22 @@ class CommonGramTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword common_words: Required. The set of common words.
- :paramtype common_words: list[str]
- :keyword ignore_case: A value indicating whether common words matching will be case
- insensitive. Default is false.
- :paramtype ignore_case: bool
- :keyword use_query_mode: A value that indicates whether the token filter is in query mode. When
- in query mode, the token filter generates bigrams and then removes common words and single
- terms followed by a common word. Default is false.
- :paramtype use_query_mode: bool
+ :vartype name: str
+ :ivar common_words: Required. The set of common words.
+ :vartype common_words: list[str]
+ :ivar ignore_case: A value indicating whether common words matching will be case insensitive.
+ Default is false.
+ :vartype ignore_case: bool
+ :ivar use_query_mode: A value that indicates whether the token filter is in query mode. When in
+ query mode, the token filter generates bigrams and then removes common words and single terms
+ followed by a common word. Default is false.
+ :vartype use_query_mode: bool
"""
_validation = {
@@ -623,6 +745,21 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword common_words: Required. The set of common words.
+ :paramtype common_words: list[str]
+ :keyword ignore_case: A value indicating whether common words matching will be case
+ insensitive. Default is false.
+ :paramtype ignore_case: bool
+ :keyword use_query_mode: A value that indicates whether the token filter is in query mode. When
+ in query mode, the token filter generates bigrams and then removes common words and single
+ terms followed by a common word. Default is false.
+ :paramtype use_query_mode: bool
+ """
super(CommonGramTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.CommonGramTokenFilter' # type: str
self.common_words = kwargs['common_words']
@@ -638,26 +775,25 @@ class SearchIndexerSkill(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
"""
_validation = {
@@ -683,6 +819,25 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ """
super(SearchIndexerSkill, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = kwargs.get('name', None)
@@ -697,26 +852,25 @@ class ConditionalSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
"""
_validation = {
@@ -738,6 +892,25 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ """
super(ConditionalSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Util.ConditionalSkill' # type: str
@@ -747,14 +920,14 @@ class CorsOptions(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword allowed_origins: Required. The list of origins from which JavaScript code will be
- granted access to your index. Can contain a list of hosts of the form
+ :ivar allowed_origins: Required. The list of origins from which JavaScript code will be granted
+ access to your index. Can contain a list of hosts of the form
{protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not
recommended).
- :paramtype allowed_origins: list[str]
- :keyword max_age_in_seconds: The duration for which browsers should cache CORS preflight
+ :vartype allowed_origins: list[str]
+ :ivar max_age_in_seconds: The duration for which browsers should cache CORS preflight
responses. Defaults to 5 minutes.
- :paramtype max_age_in_seconds: long
+ :vartype max_age_in_seconds: long
"""
_validation = {
@@ -770,6 +943,16 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword allowed_origins: Required. The list of origins from which JavaScript code will be
+ granted access to your index. Can contain a list of hosts of the form
+ {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not
+ recommended).
+ :paramtype allowed_origins: list[str]
+ :keyword max_age_in_seconds: The duration for which browsers should cache CORS preflight
+ responses. Defaults to 5 minutes.
+ :paramtype max_age_in_seconds: long
+ """
super(CorsOptions, self).__init__(**kwargs)
self.allowed_origins = kwargs['allowed_origins']
self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None)
@@ -783,13 +966,13 @@ class LexicalAnalyzer(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
+ :vartype odata_type: str
+ :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
"""
_validation = {
@@ -810,6 +993,12 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ """
super(LexicalAnalyzer, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = kwargs['name']
@@ -820,27 +1009,27 @@ class CustomAnalyzer(LexicalAnalyzer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword tokenizer: Required. The name of the tokenizer to use to divide continuous text into a
+ :vartype odata_type: str
+ :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar tokenizer: Required. The name of the tokenizer to use to divide continuous text into a
sequence of tokens, such as breaking a sentence into words. Possible values include: "classic",
"edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer",
"microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern",
"standard_v2", "uax_url_email", "whitespace".
- :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
- :keyword token_filters: A list of token filters used to filter out or modify the tokens
- generated by a tokenizer. For example, you can specify a lowercase filter that converts all
- characters to lowercase. The filters are run in the order in which they are listed.
- :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
- :keyword char_filters: A list of character filters used to prepare input text before it is
+ :vartype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
+ :ivar token_filters: A list of token filters used to filter out or modify the tokens generated
+ by a tokenizer. For example, you can specify a lowercase filter that converts all characters to
+ lowercase. The filters are run in the order in which they are listed.
+ :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
+ :ivar char_filters: A list of character filters used to prepare input text before it is
processed by the tokenizer. For instance, they can replace certain characters or symbols. The
filters are run in the order in which they are listed.
- :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
+ :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
"""
_validation = {
@@ -861,6 +1050,26 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword tokenizer: Required. The name of the tokenizer to use to divide continuous text into a
+ sequence of tokens, such as breaking a sentence into words. Possible values include: "classic",
+ "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer",
+ "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern",
+ "standard_v2", "uax_url_email", "whitespace".
+ :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
+ :keyword token_filters: A list of token filters used to filter out or modify the tokens
+ generated by a tokenizer. For example, you can specify a lowercase filter that converts all
+ characters to lowercase. The filters are run in the order in which they are listed.
+ :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
+ :keyword char_filters: A list of character filters used to prepare input text before it is
+ processed by the tokenizer. For instance, they can replace certain characters or symbols. The
+ filters are run in the order in which they are listed.
+ :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
+ """
super(CustomAnalyzer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer' # type: str
self.tokenizer = kwargs['tokenizer']
@@ -873,51 +1082,51 @@ class CustomEntity(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The top-level entity descriptor. Matches in the skill output will be
+ :ivar name: Required. The top-level entity descriptor. Matches in the skill output will be
grouped by this name, and it should represent the "normalized" form of the text being found.
- :paramtype name: str
- :keyword description: This field can be used as a passthrough for custom metadata about the
+ :vartype name: str
+ :ivar description: This field can be used as a passthrough for custom metadata about the
matched text(s). The value of this field will appear with every match of its entity in the
skill output.
- :paramtype description: str
- :keyword type: This field can be used as a passthrough for custom metadata about the matched
+ :vartype description: str
+ :ivar type: This field can be used as a passthrough for custom metadata about the matched
text(s). The value of this field will appear with every match of its entity in the skill
output.
- :paramtype type: str
- :keyword subtype: This field can be used as a passthrough for custom metadata about the matched
+ :vartype type: str
+ :ivar subtype: This field can be used as a passthrough for custom metadata about the matched
text(s). The value of this field will appear with every match of its entity in the skill
output.
- :paramtype subtype: str
- :keyword id: This field can be used as a passthrough for custom metadata about the matched
+ :vartype subtype: str
+ :ivar id: This field can be used as a passthrough for custom metadata about the matched
text(s). The value of this field will appear with every match of its entity in the skill
output.
- :paramtype id: str
- :keyword case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the
+ :vartype id: str
+ :ivar case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the
entity name should be sensitive to character casing. Sample case insensitive matches of
"Microsoft" could be: microsoft, microSoft, MICROSOFT.
- :paramtype case_sensitive: bool
- :keyword accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with
- the entity name should be sensitive to accent.
- :paramtype accent_sensitive: bool
- :keyword fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number
- of divergent characters that would still constitute a match with the entity name. The smallest
+ :vartype case_sensitive: bool
+ :ivar accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with the
+ entity name should be sensitive to accent.
+ :vartype accent_sensitive: bool
+ :ivar fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number of
+ divergent characters that would still constitute a match with the entity name. The smallest
possible fuzziness for any given match is returned. For instance, if the edit distance is set
to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case
sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but
otherwise do.
- :paramtype fuzzy_edit_distance: int
- :keyword default_case_sensitive: Changes the default case sensitivity value for this entity. It
- be used to change the default value of all aliases caseSensitive values.
- :paramtype default_case_sensitive: bool
- :keyword default_accent_sensitive: Changes the default accent sensitivity value for this
- entity. It be used to change the default value of all aliases accentSensitive values.
- :paramtype default_accent_sensitive: bool
- :keyword default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this
+ :vartype fuzzy_edit_distance: int
+ :ivar default_case_sensitive: Changes the default case sensitivity value for this entity. It be
+ used to change the default value of all aliases caseSensitive values.
+ :vartype default_case_sensitive: bool
+ :ivar default_accent_sensitive: Changes the default accent sensitivity value for this entity.
+ It be used to change the default value of all aliases accentSensitive values.
+ :vartype default_accent_sensitive: bool
+ :ivar default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this
entity. It can be used to change the default value of all aliases fuzzyEditDistance values.
- :paramtype default_fuzzy_edit_distance: int
- :keyword aliases: An array of complex objects that can be used to specify alternative spellings
- or synonyms to the root entity name.
- :paramtype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias]
+ :vartype default_fuzzy_edit_distance: int
+ :ivar aliases: An array of complex objects that can be used to specify alternative spellings or
+ synonyms to the root entity name.
+ :vartype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias]
"""
_validation = {
@@ -943,6 +1152,53 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The top-level entity descriptor. Matches in the skill output will be
+ grouped by this name, and it should represent the "normalized" form of the text being found.
+ :paramtype name: str
+ :keyword description: This field can be used as a passthrough for custom metadata about the
+ matched text(s). The value of this field will appear with every match of its entity in the
+ skill output.
+ :paramtype description: str
+ :keyword type: This field can be used as a passthrough for custom metadata about the matched
+ text(s). The value of this field will appear with every match of its entity in the skill
+ output.
+ :paramtype type: str
+ :keyword subtype: This field can be used as a passthrough for custom metadata about the matched
+ text(s). The value of this field will appear with every match of its entity in the skill
+ output.
+ :paramtype subtype: str
+ :keyword id: This field can be used as a passthrough for custom metadata about the matched
+ text(s). The value of this field will appear with every match of its entity in the skill
+ output.
+ :paramtype id: str
+ :keyword case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the
+ entity name should be sensitive to character casing. Sample case insensitive matches of
+ "Microsoft" could be: microsoft, microSoft, MICROSOFT.
+ :paramtype case_sensitive: bool
+ :keyword accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with
+ the entity name should be sensitive to accent.
+ :paramtype accent_sensitive: bool
+ :keyword fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number
+ of divergent characters that would still constitute a match with the entity name. The smallest
+ possible fuzziness for any given match is returned. For instance, if the edit distance is set
+ to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case
+ sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but
+ otherwise do.
+ :paramtype fuzzy_edit_distance: int
+ :keyword default_case_sensitive: Changes the default case sensitivity value for this entity. It
+ be used to change the default value of all aliases caseSensitive values.
+ :paramtype default_case_sensitive: bool
+ :keyword default_accent_sensitive: Changes the default accent sensitivity value for this
+ entity. It be used to change the default value of all aliases accentSensitive values.
+ :paramtype default_accent_sensitive: bool
+ :keyword default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this
+ entity. It can be used to change the default value of all aliases fuzzyEditDistance values.
+ :paramtype default_fuzzy_edit_distance: int
+ :keyword aliases: An array of complex objects that can be used to specify alternative spellings
+ or synonyms to the root entity name.
+ :paramtype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias]
+ """
super(CustomEntity, self).__init__(**kwargs)
self.name = kwargs['name']
self.description = kwargs.get('description', None)
@@ -963,14 +1219,14 @@ class CustomEntityAlias(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword text: Required. The text of the alias.
- :paramtype text: str
- :keyword case_sensitive: Determine if the alias is case sensitive.
- :paramtype case_sensitive: bool
- :keyword accent_sensitive: Determine if the alias is accent sensitive.
- :paramtype accent_sensitive: bool
- :keyword fuzzy_edit_distance: Determine the fuzzy edit distance of the alias.
- :paramtype fuzzy_edit_distance: int
+ :ivar text: Required. The text of the alias.
+ :vartype text: str
+ :ivar case_sensitive: Determine if the alias is case sensitive.
+ :vartype case_sensitive: bool
+ :ivar accent_sensitive: Determine if the alias is accent sensitive.
+ :vartype accent_sensitive: bool
+ :ivar fuzzy_edit_distance: Determine the fuzzy edit distance of the alias.
+ :vartype fuzzy_edit_distance: int
"""
_validation = {
@@ -988,6 +1244,16 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword text: Required. The text of the alias.
+ :paramtype text: str
+ :keyword case_sensitive: Determine if the alias is case sensitive.
+ :paramtype case_sensitive: bool
+ :keyword accent_sensitive: Determine if the alias is accent sensitive.
+ :paramtype accent_sensitive: bool
+ :keyword fuzzy_edit_distance: Determine the fuzzy edit distance of the alias.
+ :paramtype fuzzy_edit_distance: int
+ """
super(CustomEntityAlias, self).__init__(**kwargs)
self.text = kwargs['text']
self.case_sensitive = kwargs.get('case_sensitive', None)
@@ -1000,47 +1266,45 @@ class CustomEntityLookupSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt".
- :paramtype default_language_code: str or
+ :vartype default_language_code: str or
~azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage
- :keyword entities_definition_uri: Path to a JSON or CSV file containing all the target text to
+ :ivar entities_definition_uri: Path to a JSON or CSV file containing all the target text to
match against. This entity definition is read at the beginning of an indexer run. Any updates
to this file during an indexer run will not take effect until subsequent runs. This config must
be accessible over HTTPS.
- :paramtype entities_definition_uri: str
- :keyword inline_entities_definition: The inline CustomEntity definition.
- :paramtype inline_entities_definition:
- list[~azure.search.documents.indexes.models.CustomEntity]
- :keyword global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is
+ :vartype entities_definition_uri: str
+ :ivar inline_entities_definition: The inline CustomEntity definition.
+ :vartype inline_entities_definition: list[~azure.search.documents.indexes.models.CustomEntity]
+ :ivar global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is not
+ set in CustomEntity, this value will be the default value.
+ :vartype global_default_case_sensitive: bool
+ :ivar global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive is
not set in CustomEntity, this value will be the default value.
- :paramtype global_default_case_sensitive: bool
- :keyword global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive
- is not set in CustomEntity, this value will be the default value.
- :paramtype global_default_accent_sensitive: bool
- :keyword global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If
+ :vartype global_default_accent_sensitive: bool
+ :ivar global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If
FuzzyEditDistance is not set in CustomEntity, this value will be the default value.
- :paramtype global_default_fuzzy_edit_distance: int
+ :vartype global_default_fuzzy_edit_distance: int
"""
_validation = {
@@ -1068,6 +1332,46 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage
+ :keyword entities_definition_uri: Path to a JSON or CSV file containing all the target text to
+ match against. This entity definition is read at the beginning of an indexer run. Any updates
+ to this file during an indexer run will not take effect until subsequent runs. This config must
+ be accessible over HTTPS.
+ :paramtype entities_definition_uri: str
+ :keyword inline_entities_definition: The inline CustomEntity definition.
+ :paramtype inline_entities_definition:
+ list[~azure.search.documents.indexes.models.CustomEntity]
+ :keyword global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is
+ not set in CustomEntity, this value will be the default value.
+ :paramtype global_default_case_sensitive: bool
+ :keyword global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive
+ is not set in CustomEntity, this value will be the default value.
+ :paramtype global_default_accent_sensitive: bool
+ :keyword global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If
+ FuzzyEditDistance is not set in CustomEntity, this value will be the default value.
+ :paramtype global_default_fuzzy_edit_distance: int
+ """
super(CustomEntityLookupSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.CustomEntityLookupSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
@@ -1083,13 +1387,13 @@ class LexicalNormalizer(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the normalizer.
- :paramtype odata_type: str
- :keyword name: Required. The name of the normalizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named
- 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'.
- :paramtype name: str
+ :ivar odata_type: Required. Identifies the concrete type of the normalizer.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the normalizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding',
+ 'standard', 'lowercase', 'uppercase', or 'elision'.
+ :vartype name: str
"""
_validation = {
@@ -1106,6 +1410,15 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword odata_type: Required. Identifies the concrete type of the normalizer.
+ :paramtype odata_type: str
+ :keyword name: Required. The name of the normalizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named
+ 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'.
+ :paramtype name: str
+ """
super(LexicalNormalizer, self).__init__(**kwargs)
self.odata_type = kwargs['odata_type']
self.name = kwargs['name']
@@ -1116,21 +1429,21 @@ class CustomNormalizer(LexicalNormalizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the normalizer.
- :paramtype odata_type: str
- :keyword name: Required. The name of the normalizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named
- 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'.
- :paramtype name: str
- :keyword token_filters: A list of token filters used to filter out or modify the input token.
- For example, you can specify a lowercase filter that converts all characters to lowercase. The
+ :ivar odata_type: Required. Identifies the concrete type of the normalizer.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the normalizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding',
+ 'standard', 'lowercase', 'uppercase', or 'elision'.
+ :vartype name: str
+ :ivar token_filters: A list of token filters used to filter out or modify the input token. For
+ example, you can specify a lowercase filter that converts all characters to lowercase. The
filters are run in the order in which they are listed.
- :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
- :keyword char_filters: A list of character filters used to prepare input text before it is
+ :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
+ :ivar char_filters: A list of character filters used to prepare input text before it is
processed. For instance, they can replace certain characters or symbols. The filters are run in
the order in which they are listed.
- :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
+ :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
"""
_validation = {
@@ -1149,6 +1462,23 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword odata_type: Required. Identifies the concrete type of the normalizer.
+ :paramtype odata_type: str
+ :keyword name: Required. The name of the normalizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named
+ 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'.
+ :paramtype name: str
+ :keyword token_filters: A list of token filters used to filter out or modify the input token.
+ For example, you can specify a lowercase filter that converts all characters to lowercase. The
+ filters are run in the order in which they are listed.
+ :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
+ :keyword char_filters: A list of character filters used to prepare input text before it is
+ processed. For instance, they can replace certain characters or symbols. The filters are run in
+ the order in which they are listed.
+ :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
+ """
super(CustomNormalizer, self).__init__(**kwargs)
self.token_filters = kwargs.get('token_filters', None)
self.char_filters = kwargs.get('char_filters', None)
@@ -1162,9 +1492,9 @@ class DataChangeDetectionPolicy(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the data change detection
+ :ivar odata_type: Required. Identifies the concrete type of the data change detection
policy.Constant filled by server.
- :paramtype odata_type: str
+ :vartype odata_type: str
"""
_validation = {
@@ -1183,6 +1513,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(DataChangeDetectionPolicy, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
@@ -1195,9 +1527,9 @@ class DataDeletionDetectionPolicy(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the data deletion detection
+ :ivar odata_type: Required. Identifies the concrete type of the data deletion detection
policy.Constant filled by server.
- :paramtype odata_type: str
+ :vartype odata_type: str
"""
_validation = {
@@ -1216,6 +1548,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(DataDeletionDetectionPolicy, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
@@ -1223,9 +1557,9 @@ def __init__(
class DataSourceCredentials(msrest.serialization.Model):
"""Represents credentials that can be used to connect to a datasource.
- :keyword connection_string: The connection string for the datasource. Set to
- ':code:``' if you do not want the connection string updated.
- :paramtype connection_string: str
+ :ivar connection_string: The connection string for the datasource. Set to ':code:``'
+ if you do not want the connection string updated.
+ :vartype connection_string: str
"""
_attribute_map = {
@@ -1236,6 +1570,11 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword connection_string: The connection string for the datasource. Set to
+ ':code:``' if you do not want the connection string updated.
+ :paramtype connection_string: str
+ """
super(DataSourceCredentials, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
@@ -1245,11 +1584,11 @@ class DefaultCognitiveServicesAccount(CognitiveServicesAccount):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the cognitive service resource
+ :ivar odata_type: Required. Identifies the concrete type of the cognitive service resource
attached to a skillset.Constant filled by server.
- :paramtype odata_type: str
- :keyword description: Description of the cognitive service resource attached to a skillset.
- :paramtype description: str
+ :vartype odata_type: str
+ :ivar description: Description of the cognitive service resource attached to a skillset.
+ :vartype description: str
"""
_validation = {
@@ -1265,6 +1604,10 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword description: Description of the cognitive service resource attached to a skillset.
+ :paramtype description: str
+ """
super(DefaultCognitiveServicesAccount, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.DefaultCognitiveServices' # type: str
@@ -1274,27 +1617,27 @@ class DictionaryDecompounderTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword word_list: Required. The list of words to match against.
- :paramtype word_list: list[str]
- :keyword min_word_size: The minimum word size. Only words longer than this get processed.
- Default is 5. Maximum is 300.
- :paramtype min_word_size: int
- :keyword min_subword_size: The minimum subword size. Only subwords longer than this are
- outputted. Default is 2. Maximum is 300.
- :paramtype min_subword_size: int
- :keyword max_subword_size: The maximum subword size. Only subwords shorter than this are
+ :vartype name: str
+ :ivar word_list: Required. The list of words to match against.
+ :vartype word_list: list[str]
+ :ivar min_word_size: The minimum word size. Only words longer than this get processed. Default
+ is 5. Maximum is 300.
+ :vartype min_word_size: int
+ :ivar min_subword_size: The minimum subword size. Only subwords longer than this are outputted.
+ Default is 2. Maximum is 300.
+ :vartype min_subword_size: int
+ :ivar max_subword_size: The maximum subword size. Only subwords shorter than this are
outputted. Default is 15. Maximum is 300.
- :paramtype max_subword_size: int
- :keyword only_longest_match: A value indicating whether to add only the longest matching
- subword to the output. Default is false.
- :paramtype only_longest_match: bool
+ :vartype max_subword_size: int
+ :ivar only_longest_match: A value indicating whether to add only the longest matching subword
+ to the output. Default is false.
+ :vartype only_longest_match: bool
"""
_validation = {
@@ -1320,6 +1663,26 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword word_list: Required. The list of words to match against.
+ :paramtype word_list: list[str]
+ :keyword min_word_size: The minimum word size. Only words longer than this get processed.
+ Default is 5. Maximum is 300.
+ :paramtype min_word_size: int
+ :keyword min_subword_size: The minimum subword size. Only subwords longer than this are
+ outputted. Default is 2. Maximum is 300.
+ :paramtype min_subword_size: int
+ :keyword max_subword_size: The maximum subword size. Only subwords shorter than this are
+ outputted. Default is 15. Maximum is 300.
+ :paramtype max_subword_size: int
+ :keyword only_longest_match: A value indicating whether to add only the longest matching
+ subword to the output. Default is false.
+ :paramtype only_longest_match: bool
+ """
super(DictionaryDecompounderTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' # type: str
self.word_list = kwargs['word_list']
@@ -1337,18 +1700,18 @@ class ScoringFunction(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword type: Required. Indicates the type of function to use. Valid values include magnitude,
+ :ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
- :paramtype type: str
- :keyword field_name: Required. The name of the field used as input to the scoring function.
- :paramtype field_name: str
- :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
- to 1.0.
- :paramtype boost: float
- :keyword interpolation: A value indicating how boosting will be interpolated across document
+ :vartype type: str
+ :ivar field_name: Required. The name of the field used as input to the scoring function.
+ :vartype field_name: str
+ :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
+ 1.0.
+ :vartype boost: float
+ :ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
- :paramtype interpolation: str or
+ :vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
"""
@@ -1373,6 +1736,18 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword field_name: Required. The name of the field used as input to the scoring function.
+ :paramtype field_name: str
+ :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
+ to 1.0.
+ :paramtype boost: float
+ :keyword interpolation: A value indicating how boosting will be interpolated across document
+ scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
+ "logarithmic".
+ :paramtype interpolation: str or
+ ~azure.search.documents.indexes.models.ScoringFunctionInterpolation
+ """
super(ScoringFunction, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.field_name = kwargs['field_name']
@@ -1385,21 +1760,21 @@ class DistanceScoringFunction(ScoringFunction):
All required parameters must be populated in order to send to Azure.
- :keyword type: Required. Indicates the type of function to use. Valid values include magnitude,
+ :ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
- :paramtype type: str
- :keyword field_name: Required. The name of the field used as input to the scoring function.
- :paramtype field_name: str
- :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
- to 1.0.
- :paramtype boost: float
- :keyword interpolation: A value indicating how boosting will be interpolated across document
+ :vartype type: str
+ :ivar field_name: Required. The name of the field used as input to the scoring function.
+ :vartype field_name: str
+ :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
+ 1.0.
+ :vartype boost: float
+ :ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
- :paramtype interpolation: str or
+ :vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
- :keyword parameters: Required. Parameter values for the distance scoring function.
- :paramtype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters
+ :ivar parameters: Required. Parameter values for the distance scoring function.
+ :vartype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters
"""
_validation = {
@@ -1421,6 +1796,20 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword field_name: Required. The name of the field used as input to the scoring function.
+ :paramtype field_name: str
+ :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
+ to 1.0.
+ :paramtype boost: float
+ :keyword interpolation: A value indicating how boosting will be interpolated across document
+ scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
+ "logarithmic".
+ :paramtype interpolation: str or
+ ~azure.search.documents.indexes.models.ScoringFunctionInterpolation
+ :keyword parameters: Required. Parameter values for the distance scoring function.
+ :paramtype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters
+ """
super(DistanceScoringFunction, self).__init__(**kwargs)
self.type = 'distance' # type: str
self.parameters = kwargs['parameters']
@@ -1431,12 +1820,12 @@ class DistanceScoringParameters(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword reference_point_parameter: Required. The name of the parameter passed in search
- queries to specify the reference location.
- :paramtype reference_point_parameter: str
- :keyword boosting_distance: Required. The distance in kilometers from the reference location
- where the boosting range ends.
- :paramtype boosting_distance: float
+ :ivar reference_point_parameter: Required. The name of the parameter passed in search queries
+ to specify the reference location.
+ :vartype reference_point_parameter: str
+ :ivar boosting_distance: Required. The distance in kilometers from the reference location where
+ the boosting range ends.
+ :vartype boosting_distance: float
"""
_validation = {
@@ -1453,6 +1842,14 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword reference_point_parameter: Required. The name of the parameter passed in search
+ queries to specify the reference location.
+ :paramtype reference_point_parameter: str
+ :keyword boosting_distance: Required. The distance in kilometers from the reference location
+ where the boosting range ends.
+ :paramtype boosting_distance: float
+ """
super(DistanceScoringParameters, self).__init__(**kwargs)
self.reference_point_parameter = kwargs['reference_point_parameter']
self.boosting_distance = kwargs['boosting_distance']
@@ -1463,33 +1860,32 @@ class DocumentExtractionSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined.
- :paramtype parsing_mode: str
- :keyword data_to_extract: The type of data to be extracted for the skill. Will be set to
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined.
+ :vartype parsing_mode: str
+ :ivar data_to_extract: The type of data to be extracted for the skill. Will be set to
'contentAndMetadata' if not defined.
- :paramtype data_to_extract: str
- :keyword configuration: A dictionary of configurations for the skill.
- :paramtype configuration: dict[str, any]
+ :vartype data_to_extract: str
+ :ivar configuration: A dictionary of configurations for the skill.
+ :vartype configuration: dict[str, any]
"""
_validation = {
@@ -1514,6 +1910,32 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined.
+ :paramtype parsing_mode: str
+ :keyword data_to_extract: The type of data to be extracted for the skill. Will be set to
+ 'contentAndMetadata' if not defined.
+ :paramtype data_to_extract: str
+ :keyword configuration: A dictionary of configurations for the skill.
+ :paramtype configuration: dict[str, any]
+ """
super(DocumentExtractionSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Util.DocumentExtractionSkill' # type: str
self.parsing_mode = kwargs.get('parsing_mode', None)
@@ -1521,26 +1943,55 @@ def __init__(
self.configuration = kwargs.get('configuration', None)
+class DocumentKeysOrIds(msrest.serialization.Model):
+ """DocumentKeysOrIds.
+
+ :ivar document_keys: document keys to be reset.
+ :vartype document_keys: list[str]
+ :ivar datasource_document_ids: datasource document identifiers to be reset.
+ :vartype datasource_document_ids: list[str]
+ """
+
+ _attribute_map = {
+ 'document_keys': {'key': 'documentKeys', 'type': '[str]'},
+ 'datasource_document_ids': {'key': 'datasourceDocumentIds', 'type': '[str]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ """
+ :keyword document_keys: document keys to be reset.
+ :paramtype document_keys: list[str]
+ :keyword datasource_document_ids: datasource document identifiers to be reset.
+ :paramtype datasource_document_ids: list[str]
+ """
+ super(DocumentKeysOrIds, self).__init__(**kwargs)
+ self.document_keys = kwargs.get('document_keys', None)
+ self.datasource_document_ids = kwargs.get('datasource_document_ids', None)
+
+
class EdgeNGramTokenFilter(TokenFilter):
"""Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
+ :vartype name: str
+ :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
maxGram.
- :paramtype min_gram: int
- :keyword max_gram: The maximum n-gram length. Default is 2.
- :paramtype max_gram: int
- :keyword side: Specifies which side of the input the n-gram should be generated from. Default
- is "front". Possible values include: "front", "back".
- :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
+ :vartype min_gram: int
+ :ivar max_gram: The maximum n-gram length. Default is 2.
+ :vartype max_gram: int
+ :ivar side: Specifies which side of the input the n-gram should be generated from. Default is
+ "front". Possible values include: "front", "back".
+ :vartype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
"""
_validation = {
@@ -1560,6 +2011,20 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
+ maxGram.
+ :paramtype min_gram: int
+ :keyword max_gram: The maximum n-gram length. Default is 2.
+ :paramtype max_gram: int
+ :keyword side: Specifies which side of the input the n-gram should be generated from. Default
+ is "front". Possible values include: "front", "back".
+ :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
+ """
super(EdgeNGramTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilter' # type: str
self.min_gram = kwargs.get('min_gram', 1)
@@ -1572,21 +2037,21 @@ class EdgeNGramTokenFilterV2(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
- the value of maxGram.
- :paramtype min_gram: int
- :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
- :paramtype max_gram: int
- :keyword side: Specifies which side of the input the n-gram should be generated from. Default
- is "front". Possible values include: "front", "back".
- :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
+ :vartype name: str
+ :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the
+ value of maxGram.
+ :vartype min_gram: int
+ :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :vartype max_gram: int
+ :ivar side: Specifies which side of the input the n-gram should be generated from. Default is
+ "front". Possible values include: "front", "back".
+ :vartype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
"""
_validation = {
@@ -1608,6 +2073,20 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
+ the value of maxGram.
+ :paramtype min_gram: int
+ :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :paramtype max_gram: int
+ :keyword side: Specifies which side of the input the n-gram should be generated from. Default
+ is "front". Possible values include: "front", "back".
+ :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
+ """
super(EdgeNGramTokenFilterV2, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2' # type: str
self.min_gram = kwargs.get('min_gram', 1)
@@ -1620,20 +2099,20 @@ class EdgeNGramTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
- the value of maxGram.
- :paramtype min_gram: int
- :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
- :paramtype max_gram: int
- :keyword token_chars: Character classes to keep in the tokens.
- :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the
+ value of maxGram.
+ :vartype min_gram: int
+ :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :vartype max_gram: int
+ :ivar token_chars: Character classes to keep in the tokens.
+ :vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
"""
_validation = {
@@ -1655,6 +2134,19 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
+ the value of maxGram.
+ :paramtype min_gram: int
+ :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :paramtype max_gram: int
+ :keyword token_chars: Character classes to keep in the tokens.
+ :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
+ """
super(EdgeNGramTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenizer' # type: str
self.min_gram = kwargs.get('min_gram', 1)
@@ -1667,15 +2159,15 @@ class ElisionTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword articles: The set of articles to remove.
- :paramtype articles: list[str]
+ :vartype name: str
+ :ivar articles: The set of articles to remove.
+ :vartype articles: list[str]
"""
_validation = {
@@ -1693,6 +2185,14 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword articles: The set of articles to remove.
+ :paramtype articles: list[str]
+ """
super(ElisionTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.ElisionTokenFilter' # type: str
self.articles = kwargs.get('articles', None)
@@ -1703,36 +2203,35 @@ class EntityLinkingSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
- :paramtype default_language_code: str
- :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
+ :vartype default_language_code: str
+ :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
- :paramtype minimum_precision: float
- :keyword model_version: The version of the model to use when calling the Text Analytics
- service. It will default to the latest available when not specified. We recommend you do not
- specify this value unless absolutely necessary.
- :paramtype model_version: str
+ :vartype minimum_precision: float
+ :ivar model_version: The version of the model to use when calling the Text Analytics service.
+ It will default to the latest available when not specified. We recommend you do not specify
+ this value unless absolutely necessary.
+ :vartype model_version: str
"""
_validation = {
@@ -1758,6 +2257,35 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :paramtype default_language_code: str
+ :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ confidence score is greater than the value specified. If not set (default), or if explicitly
+ set to null, all entities will be included.
+ :paramtype minimum_precision: float
+ :keyword model_version: The version of the model to use when calling the Text Analytics
+ service. It will default to the latest available when not specified. We recommend you do not
+ specify this value unless absolutely necessary.
+ :paramtype model_version: str
+ """
super(EntityLinkingSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.V3.EntityLinkingSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
@@ -1770,42 +2298,41 @@ class EntityRecognitionSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword categories: A list of entity categories that should be extracted.
- :paramtype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar categories: A list of entity categories that should be extracted.
+ :vartype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de",
"el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr".
- :paramtype default_language_code: str or
+ :vartype default_language_code: str or
~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage
- :keyword include_typeless_entities: Determines whether or not to include entities which are
- well known but don't conform to a pre-defined type. If this configuration is not set (default),
- set to null or set to false, entities which don't conform to one of the pre-defined types will
- not be surfaced.
- :paramtype include_typeless_entities: bool
- :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ :ivar include_typeless_entities: Determines whether or not to include entities which are well
+ known but don't conform to a pre-defined type. If this configuration is not set (default), set
+ to null or set to false, entities which don't conform to one of the pre-defined types will not
+ be surfaced.
+ :vartype include_typeless_entities: bool
+ :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
- :paramtype minimum_precision: float
+ :vartype minimum_precision: float
"""
_validation = {
@@ -1831,6 +2358,41 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword categories: A list of entity categories that should be extracted.
+ :paramtype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de",
+ "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage
+ :keyword include_typeless_entities: Determines whether or not to include entities which are
+ well known but don't conform to a pre-defined type. If this configuration is not set (default),
+ set to null or set to false, entities which don't conform to one of the pre-defined types will
+ not be surfaced.
+ :paramtype include_typeless_entities: bool
+ :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ confidence score is greater than the value specified. If not set (default), or if explicitly
+ set to null, all entities will be included.
+ :paramtype minimum_precision: float
+ """
super(EntityRecognitionSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.EntityRecognitionSkill' # type: str
self.categories = kwargs.get('categories', None)
@@ -1844,38 +2406,37 @@ class EntityRecognitionSkillV3(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword categories: A list of entity categories that should be extracted.
- :paramtype categories: list[str]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
- :paramtype default_language_code: str
- :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar categories: A list of entity categories that should be extracted.
+ :vartype categories: list[str]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
+ :vartype default_language_code: str
+ :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
- :paramtype minimum_precision: float
- :keyword model_version: The version of the model to use when calling the Text Analytics
- service. It will default to the latest available when not specified. We recommend you do not
- specify this value unless absolutely necessary.
- :paramtype model_version: str
+ :vartype minimum_precision: float
+ :ivar model_version: The version of the model to use when calling the Text Analytics service.
+ It will default to the latest available when not specified. We recommend you do not specify
+ this value unless absolutely necessary.
+ :vartype model_version: str
"""
_validation = {
@@ -1902,6 +2463,37 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword categories: A list of entity categories that should be extracted.
+ :paramtype categories: list[str]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :paramtype default_language_code: str
+ :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ confidence score is greater than the value specified. If not set (default), or if explicitly
+ set to null, all entities will be included.
+ :paramtype minimum_precision: float
+ :keyword model_version: The version of the model to use when calling the Text Analytics
+ service. It will default to the latest available when not specified. We recommend you do not
+ specify this value unless absolutely necessary.
+ :paramtype model_version: str
+ """
super(EntityRecognitionSkillV3, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.V3.EntityRecognitionSkill' # type: str
self.categories = kwargs.get('categories', None)
@@ -1915,13 +2507,13 @@ class FieldMapping(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword source_field_name: Required. The name of the field in the data source.
- :paramtype source_field_name: str
- :keyword target_field_name: The name of the target field in the index. Same as the source field
+ :ivar source_field_name: Required. The name of the field in the data source.
+ :vartype source_field_name: str
+ :ivar target_field_name: The name of the target field in the index. Same as the source field
name by default.
- :paramtype target_field_name: str
- :keyword mapping_function: A function to apply to each source field value before indexing.
- :paramtype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction
+ :vartype target_field_name: str
+ :ivar mapping_function: A function to apply to each source field value before indexing.
+ :vartype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction
"""
_validation = {
@@ -1938,6 +2530,15 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword source_field_name: Required. The name of the field in the data source.
+ :paramtype source_field_name: str
+ :keyword target_field_name: The name of the target field in the index. Same as the source field
+ name by default.
+ :paramtype target_field_name: str
+ :keyword mapping_function: A function to apply to each source field value before indexing.
+ :paramtype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction
+ """
super(FieldMapping, self).__init__(**kwargs)
self.source_field_name = kwargs['source_field_name']
self.target_field_name = kwargs.get('target_field_name', None)
@@ -1949,11 +2550,11 @@ class FieldMappingFunction(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the field mapping function.
- :paramtype name: str
- :keyword parameters: A dictionary of parameter name/value pairs to pass to the function. Each
+ :ivar name: Required. The name of the field mapping function.
+ :vartype name: str
+ :ivar parameters: A dictionary of parameter name/value pairs to pass to the function. Each
value must be of a primitive type.
- :paramtype parameters: dict[str, any]
+ :vartype parameters: dict[str, any]
"""
_validation = {
@@ -1969,6 +2570,13 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the field mapping function.
+ :paramtype name: str
+ :keyword parameters: A dictionary of parameter name/value pairs to pass to the function. Each
+ value must be of a primitive type.
+ :paramtype parameters: dict[str, any]
+ """
super(FieldMappingFunction, self).__init__(**kwargs)
self.name = kwargs['name']
self.parameters = kwargs.get('parameters', None)
@@ -1979,21 +2587,21 @@ class FreshnessScoringFunction(ScoringFunction):
All required parameters must be populated in order to send to Azure.
- :keyword type: Required. Indicates the type of function to use. Valid values include magnitude,
+ :ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
- :paramtype type: str
- :keyword field_name: Required. The name of the field used as input to the scoring function.
- :paramtype field_name: str
- :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
- to 1.0.
- :paramtype boost: float
- :keyword interpolation: A value indicating how boosting will be interpolated across document
+ :vartype type: str
+ :ivar field_name: Required. The name of the field used as input to the scoring function.
+ :vartype field_name: str
+ :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
+ 1.0.
+ :vartype boost: float
+ :ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
- :paramtype interpolation: str or
+ :vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
- :keyword parameters: Required. Parameter values for the freshness scoring function.
- :paramtype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters
+ :ivar parameters: Required. Parameter values for the freshness scoring function.
+ :vartype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters
"""
_validation = {
@@ -2015,6 +2623,20 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword field_name: Required. The name of the field used as input to the scoring function.
+ :paramtype field_name: str
+ :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
+ to 1.0.
+ :paramtype boost: float
+ :keyword interpolation: A value indicating how boosting will be interpolated across document
+ scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
+ "logarithmic".
+ :paramtype interpolation: str or
+ ~azure.search.documents.indexes.models.ScoringFunctionInterpolation
+ :keyword parameters: Required. Parameter values for the freshness scoring function.
+ :paramtype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters
+ """
super(FreshnessScoringFunction, self).__init__(**kwargs)
self.type = 'freshness' # type: str
self.parameters = kwargs['parameters']
@@ -2025,9 +2647,9 @@ class FreshnessScoringParameters(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword boosting_duration: Required. The expiration period after which boosting will stop for
- a particular document.
- :paramtype boosting_duration: ~datetime.timedelta
+ :ivar boosting_duration: Required. The expiration period after which boosting will stop for a
+ particular document.
+ :vartype boosting_duration: ~datetime.timedelta
"""
_validation = {
@@ -2042,6 +2664,11 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword boosting_duration: Required. The expiration period after which boosting will stop for
+ a particular document.
+ :paramtype boosting_duration: ~datetime.timedelta
+ """
super(FreshnessScoringParameters, self).__init__(**kwargs)
self.boosting_duration = kwargs['boosting_duration']
@@ -2073,6 +2700,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(GetIndexStatisticsResult, self).__init__(**kwargs)
self.document_count = None
self.storage_size = None
@@ -2083,11 +2712,11 @@ class HighWaterMarkChangeDetectionPolicy(DataChangeDetectionPolicy):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the data change detection
+ :ivar odata_type: Required. Identifies the concrete type of the data change detection
policy.Constant filled by server.
- :paramtype odata_type: str
- :keyword high_water_mark_column_name: Required. The name of the high water mark column.
- :paramtype high_water_mark_column_name: str
+ :vartype odata_type: str
+ :ivar high_water_mark_column_name: Required. The name of the high water mark column.
+ :vartype high_water_mark_column_name: str
"""
_validation = {
@@ -2104,6 +2733,10 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword high_water_mark_column_name: Required. The name of the high water mark column.
+ :paramtype high_water_mark_column_name: str
+ """
super(HighWaterMarkChangeDetectionPolicy, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' # type: str
self.high_water_mark_column_name = kwargs['high_water_mark_column_name']
@@ -2114,34 +2747,33 @@ class ImageAnalysisSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "en", "es", "ja", "pt", "zh".
- :paramtype default_language_code: str or
+ :vartype default_language_code: str or
~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage
- :keyword visual_features: A list of visual features.
- :paramtype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature]
- :keyword details: A string indicating which domain-specific details to return.
- :paramtype details: list[str or ~azure.search.documents.indexes.models.ImageDetail]
+ :ivar visual_features: A list of visual features.
+ :vartype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature]
+ :ivar details: A string indicating which domain-specific details to return.
+ :vartype details: list[str or ~azure.search.documents.indexes.models.ImageDetail]
"""
_validation = {
@@ -2166,6 +2798,33 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "en", "es", "ja", "pt", "zh".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage
+ :keyword visual_features: A list of visual features.
+ :paramtype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature]
+ :keyword details: A string indicating which domain-specific details to return.
+ :paramtype details: list[str or ~azure.search.documents.indexes.models.ImageDetail]
+ """
super(ImageAnalysisSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Vision.ImageAnalysisSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
@@ -2227,6 +2886,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(IndexerCurrentState, self).__init__(**kwargs)
self.mode = None
self.all_docs_initial_change_tracking_state = None
@@ -2311,6 +2972,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(IndexerExecutionResult, self).__init__(**kwargs)
self.status = None
self.status_detail = None
@@ -2329,19 +2992,18 @@ def __init__(
class IndexingParameters(msrest.serialization.Model):
"""Represents parameters for indexer execution.
- :keyword batch_size: The number of items that are read from the data source and indexed as a
+ :ivar batch_size: The number of items that are read from the data source and indexed as a
single batch in order to improve performance. The default depends on the data source type.
- :paramtype batch_size: int
- :keyword max_failed_items: The maximum number of items that can fail indexing for indexer
+ :vartype batch_size: int
+ :ivar max_failed_items: The maximum number of items that can fail indexing for indexer
execution to still be considered successful. -1 means no limit. Default is 0.
- :paramtype max_failed_items: int
- :keyword max_failed_items_per_batch: The maximum number of items in a single batch that can
- fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0.
- :paramtype max_failed_items_per_batch: int
- :keyword configuration: A dictionary of indexer-specific configuration properties. Each name is
+ :vartype max_failed_items: int
+ :ivar max_failed_items_per_batch: The maximum number of items in a single batch that can fail
+ indexing for the batch to still be considered successful. -1 means no limit. Default is 0.
+ :vartype max_failed_items_per_batch: int
+ :ivar configuration: A dictionary of indexer-specific configuration properties. Each name is
the name of a specific property. Each value must be of a primitive type.
- :paramtype configuration:
- ~azure.search.documents.indexes.models.IndexingParametersConfiguration
+ :vartype configuration: ~azure.search.documents.indexes.models.IndexingParametersConfiguration
"""
_attribute_map = {
@@ -2355,6 +3017,21 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword batch_size: The number of items that are read from the data source and indexed as a
+ single batch in order to improve performance. The default depends on the data source type.
+ :paramtype batch_size: int
+ :keyword max_failed_items: The maximum number of items that can fail indexing for indexer
+ execution to still be considered successful. -1 means no limit. Default is 0.
+ :paramtype max_failed_items: int
+ :keyword max_failed_items_per_batch: The maximum number of items in a single batch that can
+ fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0.
+ :paramtype max_failed_items_per_batch: int
+ :keyword configuration: A dictionary of indexer-specific configuration properties. Each name is
+ the name of a specific property. Each value must be of a primitive type.
+ :paramtype configuration:
+ ~azure.search.documents.indexes.models.IndexingParametersConfiguration
+ """
super(IndexingParameters, self).__init__(**kwargs)
self.batch_size = kwargs.get('batch_size', None)
self.max_failed_items = kwargs.get('max_failed_items', 0)
@@ -2365,73 +3042,73 @@ def __init__(
class IndexingParametersConfiguration(msrest.serialization.Model):
"""A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
- :keyword parsing_mode: Represents the parsing mode for indexing from an Azure blob data source.
+ :vartype additional_properties: dict[str, any]
+ :ivar parsing_mode: Represents the parsing mode for indexing from an Azure blob data source.
Possible values include: "default", "text", "delimitedText", "json", "jsonArray", "jsonLines".
Default value: "default".
- :paramtype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode
- :keyword excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore
- when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip
- over those files during indexing.
- :paramtype excluded_file_name_extensions: str
- :keyword indexed_file_name_extensions: Comma-delimited list of filename extensions to select
- when processing from Azure blob storage. For example, you could focus indexing on specific
+ :vartype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode
+ :ivar excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore when
+ processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip over
+ those files during indexing.
+ :vartype excluded_file_name_extensions: str
+ :ivar indexed_file_name_extensions: Comma-delimited list of filename extensions to select when
+ processing from Azure blob storage. For example, you could focus indexing on specific
application files ".docx, .pptx, .msg" to specifically include those file types.
- :paramtype indexed_file_name_extensions: str
- :keyword fail_on_unsupported_content_type: For Azure blobs, set to false if you want to
- continue indexing when an unsupported content type is encountered, and you don't know all the
- content types (file extensions) in advance.
- :paramtype fail_on_unsupported_content_type: bool
- :keyword fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue
+ :vartype indexed_file_name_extensions: str
+ :ivar fail_on_unsupported_content_type: For Azure blobs, set to false if you want to continue
+ indexing when an unsupported content type is encountered, and you don't know all the content
+ types (file extensions) in advance.
+ :vartype fail_on_unsupported_content_type: bool
+ :ivar fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue
indexing if a document fails indexing.
- :paramtype fail_on_unprocessable_document: bool
- :keyword index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this
- property to true to still index storage metadata for blob content that is too large to process.
+ :vartype fail_on_unprocessable_document: bool
+ :ivar index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this property
+ to true to still index storage metadata for blob content that is too large to process.
Oversized blobs are treated as errors by default. For limits on blob size, see
https://docs.microsoft.com/azure/search/search-limits-quotas-capacity.
- :paramtype index_storage_metadata_only_for_oversized_documents: bool
- :keyword delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column
+ :vartype index_storage_metadata_only_for_oversized_documents: bool
+ :ivar delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column
headers, useful for mapping source fields to destination fields in an index.
- :paramtype delimited_text_headers: str
- :keyword delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character
+ :vartype delimited_text_headers: str
+ :ivar delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character
delimiter for CSV files where each line starts a new document (for example, "|").
- :paramtype delimited_text_delimiter: str
- :keyword first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line
- of each blob contains headers.
- :paramtype first_line_contains_headers: bool
- :keyword document_root: For JSON arrays, given a structured or semi-structured document, you
- can specify a path to the array using this property.
- :paramtype document_root: str
- :keyword data_to_extract: Specifies the data to extract from Azure blob storage and tells the
+ :vartype delimited_text_delimiter: str
+ :ivar first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line of
+ each blob contains headers.
+ :vartype first_line_contains_headers: bool
+ :ivar document_root: For JSON arrays, given a structured or semi-structured document, you can
+ specify a path to the array using this property.
+ :vartype document_root: str
+ :ivar data_to_extract: Specifies the data to extract from Azure blob storage and tells the
indexer which data to extract from image content when "imageAction" is set to a value other
than "none". This applies to embedded image content in a .PDF or other application, or image
files such as .jpg and .png, in Azure blobs. Possible values include: "storageMetadata",
"allMetadata", "contentAndMetadata". Default value: "contentAndMetadata".
- :paramtype data_to_extract: str or
+ :vartype data_to_extract: str or
~azure.search.documents.indexes.models.BlobIndexerDataToExtract
- :keyword image_action: Determines how to process embedded images and image files in Azure blob
+ :ivar image_action: Determines how to process embedded images and image files in Azure blob
storage. Setting the "imageAction" configuration to any value other than "none" requires that
a skillset also be attached to that indexer. Possible values include: "none",
"generateNormalizedImages", "generateNormalizedImagePerPage". Default value: "none".
- :paramtype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction
- :keyword allow_skillset_to_read_file_data: If true, will create a path //document//file_data
- that is an object representing the original file data downloaded from your blob data source.
- This allows you to pass the original file data to a custom skill for processing within the
+ :vartype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction
+ :ivar allow_skillset_to_read_file_data: If true, will create a path //document//file_data that
+ is an object representing the original file data downloaded from your blob data source. This
+ allows you to pass the original file data to a custom skill for processing within the
enrichment pipeline, or to the Document Extraction skill.
- :paramtype allow_skillset_to_read_file_data: bool
- :keyword pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files
- in Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none".
- :paramtype pdf_text_rotation_algorithm: str or
+ :vartype allow_skillset_to_read_file_data: bool
+ :ivar pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files in
+ Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none".
+ :vartype pdf_text_rotation_algorithm: str or
~azure.search.documents.indexes.models.BlobIndexerPDFTextRotationAlgorithm
- :keyword execution_environment: Specifies the environment in which the indexer should execute.
+ :ivar execution_environment: Specifies the environment in which the indexer should execute.
Possible values include: "standard", "private". Default value: "standard".
- :paramtype execution_environment: str or
+ :vartype execution_environment: str or
~azure.search.documents.indexes.models.IndexerExecutionEnvironment
- :keyword query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL
- database data sources, specified in the format "hh:mm:ss".
- :paramtype query_timeout: str
+ :ivar query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL database
+ data sources, specified in the format "hh:mm:ss".
+ :vartype query_timeout: str
"""
_attribute_map = {
@@ -2458,6 +3135,75 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ :keyword parsing_mode: Represents the parsing mode for indexing from an Azure blob data source.
+ Possible values include: "default", "text", "delimitedText", "json", "jsonArray", "jsonLines".
+ Default value: "default".
+ :paramtype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode
+ :keyword excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore
+ when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip
+ over those files during indexing.
+ :paramtype excluded_file_name_extensions: str
+ :keyword indexed_file_name_extensions: Comma-delimited list of filename extensions to select
+ when processing from Azure blob storage. For example, you could focus indexing on specific
+ application files ".docx, .pptx, .msg" to specifically include those file types.
+ :paramtype indexed_file_name_extensions: str
+ :keyword fail_on_unsupported_content_type: For Azure blobs, set to false if you want to
+ continue indexing when an unsupported content type is encountered, and you don't know all the
+ content types (file extensions) in advance.
+ :paramtype fail_on_unsupported_content_type: bool
+ :keyword fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue
+ indexing if a document fails indexing.
+ :paramtype fail_on_unprocessable_document: bool
+ :keyword index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this
+ property to true to still index storage metadata for blob content that is too large to process.
+ Oversized blobs are treated as errors by default. For limits on blob size, see
+ https://docs.microsoft.com/azure/search/search-limits-quotas-capacity.
+ :paramtype index_storage_metadata_only_for_oversized_documents: bool
+ :keyword delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column
+ headers, useful for mapping source fields to destination fields in an index.
+ :paramtype delimited_text_headers: str
+ :keyword delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character
+ delimiter for CSV files where each line starts a new document (for example, "|").
+ :paramtype delimited_text_delimiter: str
+ :keyword first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line
+ of each blob contains headers.
+ :paramtype first_line_contains_headers: bool
+ :keyword document_root: For JSON arrays, given a structured or semi-structured document, you
+ can specify a path to the array using this property.
+ :paramtype document_root: str
+ :keyword data_to_extract: Specifies the data to extract from Azure blob storage and tells the
+ indexer which data to extract from image content when "imageAction" is set to a value other
+ than "none". This applies to embedded image content in a .PDF or other application, or image
+ files such as .jpg and .png, in Azure blobs. Possible values include: "storageMetadata",
+ "allMetadata", "contentAndMetadata". Default value: "contentAndMetadata".
+ :paramtype data_to_extract: str or
+ ~azure.search.documents.indexes.models.BlobIndexerDataToExtract
+ :keyword image_action: Determines how to process embedded images and image files in Azure blob
+ storage. Setting the "imageAction" configuration to any value other than "none" requires that
+ a skillset also be attached to that indexer. Possible values include: "none",
+ "generateNormalizedImages", "generateNormalizedImagePerPage". Default value: "none".
+ :paramtype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction
+ :keyword allow_skillset_to_read_file_data: If true, will create a path //document//file_data
+ that is an object representing the original file data downloaded from your blob data source.
+ This allows you to pass the original file data to a custom skill for processing within the
+ enrichment pipeline, or to the Document Extraction skill.
+ :paramtype allow_skillset_to_read_file_data: bool
+ :keyword pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files
+ in Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none".
+ :paramtype pdf_text_rotation_algorithm: str or
+ ~azure.search.documents.indexes.models.BlobIndexerPDFTextRotationAlgorithm
+ :keyword execution_environment: Specifies the environment in which the indexer should execute.
+ Possible values include: "standard", "private". Default value: "standard".
+ :paramtype execution_environment: str or
+ ~azure.search.documents.indexes.models.IndexerExecutionEnvironment
+ :keyword query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL
+ database data sources, specified in the format "hh:mm:ss".
+ :paramtype query_timeout: str
+ """
super(IndexingParametersConfiguration, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.parsing_mode = kwargs.get('parsing_mode', "default")
@@ -2483,10 +3229,10 @@ class IndexingSchedule(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword interval: Required. The interval of time between indexer executions.
- :paramtype interval: ~datetime.timedelta
- :keyword start_time: The time when an indexer should start running.
- :paramtype start_time: ~datetime.datetime
+ :ivar interval: Required. The interval of time between indexer executions.
+ :vartype interval: ~datetime.timedelta
+ :ivar start_time: The time when an indexer should start running.
+ :vartype start_time: ~datetime.datetime
"""
_validation = {
@@ -2502,6 +3248,12 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword interval: Required. The interval of time between indexer executions.
+ :paramtype interval: ~datetime.timedelta
+ :keyword start_time: The time when an indexer should start running.
+ :paramtype start_time: ~datetime.datetime
+ """
super(IndexingSchedule, self).__init__(**kwargs)
self.interval = kwargs['interval']
self.start_time = kwargs.get('start_time', None)
@@ -2512,14 +3264,14 @@ class InputFieldMappingEntry(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the input.
- :paramtype name: str
- :keyword source: The source of the input.
- :paramtype source: str
- :keyword source_context: The source context used for selecting recursive inputs.
- :paramtype source_context: str
- :keyword inputs: The recursive inputs used when creating a complex type.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar name: Required. The name of the input.
+ :vartype name: str
+ :ivar source: The source of the input.
+ :vartype source: str
+ :ivar source_context: The source context used for selecting recursive inputs.
+ :vartype source_context: str
+ :ivar inputs: The recursive inputs used when creating a complex type.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
"""
_validation = {
@@ -2537,6 +3289,16 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the input.
+ :paramtype name: str
+ :keyword source: The source of the input.
+ :paramtype source: str
+ :keyword source_context: The source context used for selecting recursive inputs.
+ :paramtype source_context: str
+ :keyword inputs: The recursive inputs used when creating a complex type.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ """
super(InputFieldMappingEntry, self).__init__(**kwargs)
self.name = kwargs['name']
self.source = kwargs.get('source', None)
@@ -2549,18 +3311,18 @@ class KeepTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword keep_words: Required. The list of words to keep.
- :paramtype keep_words: list[str]
- :keyword lower_case_keep_words: A value indicating whether to lower case all words first.
- Default is false.
- :paramtype lower_case_keep_words: bool
+ :vartype name: str
+ :ivar keep_words: Required. The list of words to keep.
+ :vartype keep_words: list[str]
+ :ivar lower_case_keep_words: A value indicating whether to lower case all words first. Default
+ is false.
+ :vartype lower_case_keep_words: bool
"""
_validation = {
@@ -2580,6 +3342,17 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword keep_words: Required. The list of words to keep.
+ :paramtype keep_words: list[str]
+ :keyword lower_case_keep_words: A value indicating whether to lower case all words first.
+ Default is false.
+ :paramtype lower_case_keep_words: bool
+ """
super(KeepTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.KeepTokenFilter' # type: str
self.keep_words = kwargs['keep_words']
@@ -2591,38 +3364,37 @@ class KeyPhraseExtractionSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl",
"pt-PT", "pt-BR", "ru", "es", "sv".
- :paramtype default_language_code: str or
+ :vartype default_language_code: str or
~azure.search.documents.indexes.models.KeyPhraseExtractionSkillLanguage
- :keyword max_key_phrase_count: A number indicating how many key phrases to return. If absent,
- all identified key phrases will be returned.
- :paramtype max_key_phrase_count: int
- :keyword model_version: The version of the model to use when calling the Text Analytics
- service. It will default to the latest available when not specified. We recommend you do not
- specify this value unless absolutely necessary.
- :paramtype model_version: str
+ :ivar max_key_phrase_count: A number indicating how many key phrases to return. If absent, all
+ identified key phrases will be returned.
+ :vartype max_key_phrase_count: int
+ :ivar model_version: The version of the model to use when calling the Text Analytics service.
+ It will default to the latest available when not specified. We recommend you do not specify
+ this value unless absolutely necessary.
+ :vartype model_version: str
"""
_validation = {
@@ -2647,6 +3419,37 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl",
+ "pt-PT", "pt-BR", "ru", "es", "sv".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.KeyPhraseExtractionSkillLanguage
+ :keyword max_key_phrase_count: A number indicating how many key phrases to return. If absent,
+ all identified key phrases will be returned.
+ :paramtype max_key_phrase_count: int
+ :keyword model_version: The version of the model to use when calling the Text Analytics
+ service. It will default to the latest available when not specified. We recommend you do not
+ specify this value unless absolutely necessary.
+ :paramtype model_version: str
+ """
super(KeyPhraseExtractionSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.KeyPhraseExtractionSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
@@ -2659,18 +3462,18 @@ class KeywordMarkerTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword keywords: Required. A list of words to mark as keywords.
- :paramtype keywords: list[str]
- :keyword ignore_case: A value indicating whether to ignore case. If true, all words are
- converted to lower case first. Default is false.
- :paramtype ignore_case: bool
+ :vartype name: str
+ :ivar keywords: Required. A list of words to mark as keywords.
+ :vartype keywords: list[str]
+ :ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted
+ to lower case first. Default is false.
+ :vartype ignore_case: bool
"""
_validation = {
@@ -2690,6 +3493,17 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword keywords: Required. A list of words to mark as keywords.
+ :paramtype keywords: list[str]
+ :keyword ignore_case: A value indicating whether to ignore case. If true, all words are
+ converted to lower case first. Default is false.
+ :paramtype ignore_case: bool
+ """
super(KeywordMarkerTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' # type: str
self.keywords = kwargs['keywords']
@@ -2701,15 +3515,15 @@ class KeywordTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword buffer_size: The read buffer size in bytes. Default is 256.
- :paramtype buffer_size: int
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar buffer_size: The read buffer size in bytes. Default is 256.
+ :vartype buffer_size: int
"""
_validation = {
@@ -2727,6 +3541,14 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword buffer_size: The read buffer size in bytes. Default is 256.
+ :paramtype buffer_size: int
+ """
super(KeywordTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizer' # type: str
self.buffer_size = kwargs.get('buffer_size', 256)
@@ -2737,16 +3559,16 @@ class KeywordTokenizerV2(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Default is 256. Tokens longer than the
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Default is 256. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
- :paramtype max_token_length: int
+ :vartype max_token_length: int
"""
_validation = {
@@ -2765,6 +3587,15 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Default is 256. Tokens longer than the
+ maximum length are split. The maximum token length that can be used is 300 characters.
+ :paramtype max_token_length: int
+ """
super(KeywordTokenizerV2, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizerV2' # type: str
self.max_token_length = kwargs.get('max_token_length', 256)
@@ -2775,33 +3606,32 @@ class LanguageDetectionSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_country_hint: A country code to use as a hint to the language detection model
- if it cannot disambiguate the language.
- :paramtype default_country_hint: str
- :keyword model_version: The version of the model to use when calling the Text Analytics
- service. It will default to the latest available when not specified. We recommend you do not
- specify this value unless absolutely necessary.
- :paramtype model_version: str
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_country_hint: A country code to use as a hint to the language detection model if
+ it cannot disambiguate the language.
+ :vartype default_country_hint: str
+ :ivar model_version: The version of the model to use when calling the Text Analytics service.
+ It will default to the latest available when not specified. We recommend you do not specify
+ this value unless absolutely necessary.
+ :vartype model_version: str
"""
_validation = {
@@ -2825,6 +3655,32 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_country_hint: A country code to use as a hint to the language detection model
+ if it cannot disambiguate the language.
+ :paramtype default_country_hint: str
+ :keyword model_version: The version of the model to use when calling the Text Analytics
+ service. It will default to the latest available when not specified. We recommend you do not
+ specify this value unless absolutely necessary.
+ :paramtype model_version: str
+ """
super(LanguageDetectionSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.LanguageDetectionSkill' # type: str
self.default_country_hint = kwargs.get('default_country_hint', None)
@@ -2836,18 +3692,18 @@ class LengthTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be
- less than the value of max.
- :paramtype min_length: int
- :keyword max_length: The maximum length in characters. Default and maximum is 300.
- :paramtype max_length: int
+ :vartype name: str
+ :ivar min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be less
+ than the value of max.
+ :vartype min_length: int
+ :ivar max_length: The maximum length in characters. Default and maximum is 300.
+ :vartype max_length: int
"""
_validation = {
@@ -2868,6 +3724,17 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be
+ less than the value of max.
+ :paramtype min_length: int
+ :keyword max_length: The maximum length in characters. Default and maximum is 300.
+ :paramtype max_length: int
+ """
super(LengthTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.LengthTokenFilter' # type: str
self.min_length = kwargs.get('min_length', 0)
@@ -2879,18 +3746,18 @@ class LimitTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword max_token_count: The maximum number of tokens to produce. Default is 1.
- :paramtype max_token_count: int
- :keyword consume_all_tokens: A value indicating whether all tokens from the input must be
- consumed even if maxTokenCount is reached. Default is false.
- :paramtype consume_all_tokens: bool
+ :vartype name: str
+ :ivar max_token_count: The maximum number of tokens to produce. Default is 1.
+ :vartype max_token_count: int
+ :ivar consume_all_tokens: A value indicating whether all tokens from the input must be consumed
+ even if maxTokenCount is reached. Default is false.
+ :vartype consume_all_tokens: bool
"""
_validation = {
@@ -2909,6 +3776,17 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_count: The maximum number of tokens to produce. Default is 1.
+ :paramtype max_token_count: int
+ :keyword consume_all_tokens: A value indicating whether all tokens from the input must be
+ consumed even if maxTokenCount is reached. Default is false.
+ :paramtype consume_all_tokens: bool
+ """
super(LimitTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.LimitTokenFilter' # type: str
self.max_token_count = kwargs.get('max_token_count', 1)
@@ -2938,6 +3816,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(ListDataSourcesResult, self).__init__(**kwargs)
self.data_sources = None
@@ -2965,6 +3845,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(ListIndexersResult, self).__init__(**kwargs)
self.indexers = None
@@ -2992,6 +3874,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(ListIndexesResult, self).__init__(**kwargs)
self.indexes = None
@@ -3019,6 +3903,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(ListSkillsetsResult, self).__init__(**kwargs)
self.skillsets = None
@@ -3046,6 +3932,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(ListSynonymMapsResult, self).__init__(**kwargs)
self.synonym_maps = None
@@ -3055,18 +3943,18 @@ class LuceneStandardAnalyzer(LexicalAnalyzer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ :vartype odata_type: str
+ :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
- :paramtype max_token_length: int
- :keyword stopwords: A list of stopwords.
- :paramtype stopwords: list[str]
+ :vartype max_token_length: int
+ :ivar stopwords: A list of stopwords.
+ :vartype stopwords: list[str]
"""
_validation = {
@@ -3086,6 +3974,17 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ maximum length are split. The maximum token length that can be used is 300 characters.
+ :paramtype max_token_length: int
+ :keyword stopwords: A list of stopwords.
+ :paramtype stopwords: list[str]
+ """
super(LuceneStandardAnalyzer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
@@ -3097,16 +3996,16 @@ class LuceneStandardTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split.
- :paramtype max_token_length: int
+ :vartype max_token_length: int
"""
_validation = {
@@ -3124,6 +4023,15 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ maximum length are split.
+ :paramtype max_token_length: int
+ """
super(LuceneStandardTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
@@ -3134,16 +4042,16 @@ class LuceneStandardTokenizerV2(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
- :paramtype max_token_length: int
+ :vartype max_token_length: int
"""
_validation = {
@@ -3162,6 +4070,15 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ maximum length are split. The maximum token length that can be used is 300 characters.
+ :paramtype max_token_length: int
+ """
super(LuceneStandardTokenizerV2, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
@@ -3172,21 +4089,21 @@ class MagnitudeScoringFunction(ScoringFunction):
All required parameters must be populated in order to send to Azure.
- :keyword type: Required. Indicates the type of function to use. Valid values include magnitude,
+ :ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
- :paramtype type: str
- :keyword field_name: Required. The name of the field used as input to the scoring function.
- :paramtype field_name: str
- :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
- to 1.0.
- :paramtype boost: float
- :keyword interpolation: A value indicating how boosting will be interpolated across document
+ :vartype type: str
+ :ivar field_name: Required. The name of the field used as input to the scoring function.
+ :vartype field_name: str
+ :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
+ 1.0.
+ :vartype boost: float
+ :ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
- :paramtype interpolation: str or
+ :vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
- :keyword parameters: Required. Parameter values for the magnitude scoring function.
- :paramtype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters
+ :ivar parameters: Required. Parameter values for the magnitude scoring function.
+ :vartype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters
"""
_validation = {
@@ -3208,6 +4125,20 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword field_name: Required. The name of the field used as input to the scoring function.
+ :paramtype field_name: str
+ :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
+ to 1.0.
+ :paramtype boost: float
+ :keyword interpolation: A value indicating how boosting will be interpolated across document
+ scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
+ "logarithmic".
+ :paramtype interpolation: str or
+ ~azure.search.documents.indexes.models.ScoringFunctionInterpolation
+ :keyword parameters: Required. Parameter values for the magnitude scoring function.
+ :paramtype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters
+ """
super(MagnitudeScoringFunction, self).__init__(**kwargs)
self.type = 'magnitude' # type: str
self.parameters = kwargs['parameters']
@@ -3218,13 +4149,13 @@ class MagnitudeScoringParameters(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword boosting_range_start: Required. The field value at which boosting starts.
- :paramtype boosting_range_start: float
- :keyword boosting_range_end: Required. The field value at which boosting ends.
- :paramtype boosting_range_end: float
- :keyword should_boost_beyond_range_by_constant: A value indicating whether to apply a constant
+ :ivar boosting_range_start: Required. The field value at which boosting starts.
+ :vartype boosting_range_start: float
+ :ivar boosting_range_end: Required. The field value at which boosting ends.
+ :vartype boosting_range_end: float
+ :ivar should_boost_beyond_range_by_constant: A value indicating whether to apply a constant
boost for field values beyond the range end value; default is false.
- :paramtype should_boost_beyond_range_by_constant: bool
+ :vartype should_boost_beyond_range_by_constant: bool
"""
_validation = {
@@ -3242,6 +4173,15 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword boosting_range_start: Required. The field value at which boosting starts.
+ :paramtype boosting_range_start: float
+ :keyword boosting_range_end: Required. The field value at which boosting ends.
+ :paramtype boosting_range_end: float
+ :keyword should_boost_beyond_range_by_constant: A value indicating whether to apply a constant
+ boost for field values beyond the range end value; default is false.
+ :paramtype should_boost_beyond_range_by_constant: bool
+ """
super(MagnitudeScoringParameters, self).__init__(**kwargs)
self.boosting_range_start = kwargs['boosting_range_start']
self.boosting_range_end = kwargs['boosting_range_end']
@@ -3253,16 +4193,16 @@ class MappingCharFilter(CharFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the char filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the char filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the char filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword mappings: Required. A list of mappings of the following format: "a=>b" (all
- occurrences of the character "a" will be replaced with character "b").
- :paramtype mappings: list[str]
+ :vartype name: str
+ :ivar mappings: Required. A list of mappings of the following format: "a=>b" (all occurrences
+ of the character "a" will be replaced with character "b").
+ :vartype mappings: list[str]
"""
_validation = {
@@ -3281,6 +4221,15 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the char filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword mappings: Required. A list of mappings of the following format: "a=>b" (all
+ occurrences of the character "a" will be replaced with character "b").
+ :paramtype mappings: list[str]
+ """
super(MappingCharFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.MappingCharFilter' # type: str
self.mappings = kwargs['mappings']
@@ -3291,32 +4240,31 @@ class MergeSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is
- an empty space.
- :paramtype insert_pre_tag: str
- :keyword insert_post_tag: The tag indicates the end of the merged text. By default, the tag is
- an empty space.
- :paramtype insert_post_tag: str
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is an
+ empty space.
+ :vartype insert_pre_tag: str
+ :ivar insert_post_tag: The tag indicates the end of the merged text. By default, the tag is an
+ empty space.
+ :vartype insert_post_tag: str
"""
_validation = {
@@ -3340,6 +4288,31 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is
+ an empty space.
+ :paramtype insert_pre_tag: str
+ :keyword insert_post_tag: The tag indicates the end of the merged text. By default, the tag is
+ an empty space.
+ :paramtype insert_post_tag: str
+ """
super(MergeSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.MergeSkill' # type: str
self.insert_pre_tag = kwargs.get('insert_pre_tag', " ")
@@ -3351,29 +4324,29 @@ class MicrosoftLanguageStemmingTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Tokens longer than the maximum length are
split. Maximum token length that can be used is 300 characters. Tokens longer than 300
characters are first split into tokens of length 300 and then each of those tokens is split
based on the max token length set. Default is 255.
- :paramtype max_token_length: int
- :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used
- as the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
- :paramtype is_search_tokenizer: bool
- :keyword language: The language to use. The default is English. Possible values include:
- "arabic", "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english",
+ :vartype max_token_length: int
+ :ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as
+ the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
+ :vartype is_search_tokenizer: bool
+ :ivar language: The language to use. The default is English. Possible values include: "arabic",
+ "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english",
"estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian",
"icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam",
"marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi",
"romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish",
"swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu".
- :paramtype language: str or
+ :vartype language: str or
~azure.search.documents.indexes.models.MicrosoftStemmingTokenizerLanguage
"""
@@ -3395,6 +4368,29 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are
+ split. Maximum token length that can be used is 300 characters. Tokens longer than 300
+ characters are first split into tokens of length 300 and then each of those tokens is split
+ based on the max token length set. Default is 255.
+ :paramtype max_token_length: int
+ :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used
+ as the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
+ :paramtype is_search_tokenizer: bool
+ :keyword language: The language to use. The default is English. Possible values include:
+ "arabic", "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english",
+ "estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian",
+ "icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam",
+ "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi",
+ "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish",
+ "swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu".
+ :paramtype language: str or
+ ~azure.search.documents.indexes.models.MicrosoftStemmingTokenizerLanguage
+ """
super(MicrosoftLanguageStemmingTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
@@ -3407,29 +4403,29 @@ class MicrosoftLanguageTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Tokens longer than the maximum length are
split. Maximum token length that can be used is 300 characters. Tokens longer than 300
characters are first split into tokens of length 300 and then each of those tokens is split
based on the max token length set. Default is 255.
- :paramtype max_token_length: int
- :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used
- as the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
- :paramtype is_search_tokenizer: bool
- :keyword language: The language to use. The default is English. Possible values include:
- "bangla", "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian",
- "czech", "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi",
- "icelandic", "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam",
- "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi",
- "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish",
- "tamil", "telugu", "thai", "ukrainian", "urdu", "vietnamese".
- :paramtype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage
+ :vartype max_token_length: int
+ :ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as
+ the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
+ :vartype is_search_tokenizer: bool
+ :ivar language: The language to use. The default is English. Possible values include: "bangla",
+ "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", "czech",
+ "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", "icelandic",
+ "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", "marathi",
+ "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", "romanian",
+ "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", "tamil",
+ "telugu", "thai", "ukrainian", "urdu", "vietnamese".
+ :vartype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage
"""
_validation = {
@@ -3450,6 +4446,28 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are
+ split. Maximum token length that can be used is 300 characters. Tokens longer than 300
+ characters are first split into tokens of length 300 and then each of those tokens is split
+ based on the max token length set. Default is 255.
+ :paramtype max_token_length: int
+ :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used
+ as the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
+ :paramtype is_search_tokenizer: bool
+ :keyword language: The language to use. The default is English. Possible values include:
+ "bangla", "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian",
+ "czech", "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi",
+ "icelandic", "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam",
+ "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi",
+ "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish",
+ "tamil", "telugu", "thai", "ukrainian", "urdu", "vietnamese".
+ :paramtype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage
+ """
super(MicrosoftLanguageTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
@@ -3462,18 +4480,18 @@ class NGramTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
+ :vartype name: str
+ :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
maxGram.
- :paramtype min_gram: int
- :keyword max_gram: The maximum n-gram length. Default is 2.
- :paramtype max_gram: int
+ :vartype min_gram: int
+ :ivar max_gram: The maximum n-gram length. Default is 2.
+ :vartype max_gram: int
"""
_validation = {
@@ -3492,6 +4510,17 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
+ maxGram.
+ :paramtype min_gram: int
+ :keyword max_gram: The maximum n-gram length. Default is 2.
+ :paramtype max_gram: int
+ """
super(NGramTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilter' # type: str
self.min_gram = kwargs.get('min_gram', 1)
@@ -3503,18 +4532,18 @@ class NGramTokenFilterV2(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
- the value of maxGram.
- :paramtype min_gram: int
- :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
- :paramtype max_gram: int
+ :vartype name: str
+ :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the
+ value of maxGram.
+ :vartype min_gram: int
+ :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :vartype max_gram: int
"""
_validation = {
@@ -3535,6 +4564,17 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
+ the value of maxGram.
+ :paramtype min_gram: int
+ :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :paramtype max_gram: int
+ """
super(NGramTokenFilterV2, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilterV2' # type: str
self.min_gram = kwargs.get('min_gram', 1)
@@ -3546,20 +4586,20 @@ class NGramTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
- the value of maxGram.
- :paramtype min_gram: int
- :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
- :paramtype max_gram: int
- :keyword token_chars: Character classes to keep in the tokens.
- :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the
+ value of maxGram.
+ :vartype min_gram: int
+ :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :vartype max_gram: int
+ :ivar token_chars: Character classes to keep in the tokens.
+ :vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
"""
_validation = {
@@ -3581,6 +4621,19 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
+ the value of maxGram.
+ :paramtype min_gram: int
+ :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :paramtype max_gram: int
+ :keyword token_chars: Character classes to keep in the tokens.
+ :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
+ """
super(NGramTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.NGramTokenizer' # type: str
self.min_gram = kwargs.get('min_gram', 1)
@@ -3593,39 +4646,37 @@ class OcrSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el",
"hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl",
"sr-Latn", "sk".
- :paramtype default_language_code: str or
- ~azure.search.documents.indexes.models.OcrSkillLanguage
- :keyword should_detect_orientation: A value indicating to turn orientation detection on or not.
+ :vartype default_language_code: str or ~azure.search.documents.indexes.models.OcrSkillLanguage
+ :ivar should_detect_orientation: A value indicating to turn orientation detection on or not.
Default is false.
- :paramtype should_detect_orientation: bool
- :keyword line_ending: Defines the sequence of characters to use between the lines of text
+ :vartype should_detect_orientation: bool
+ :ivar line_ending: Defines the sequence of characters to use between the lines of text
recognized by the OCR skill. The default value is "space". Possible values include: "space",
"carriageReturn", "lineFeed", "carriageReturnLineFeed".
- :paramtype line_ending: str or ~azure.search.documents.indexes.models.LineEnding
+ :vartype line_ending: str or ~azure.search.documents.indexes.models.LineEnding
"""
_validation = {
@@ -3650,6 +4701,38 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el",
+ "hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl",
+ "sr-Latn", "sk".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.OcrSkillLanguage
+ :keyword should_detect_orientation: A value indicating to turn orientation detection on or not.
+ Default is false.
+ :paramtype should_detect_orientation: bool
+ :keyword line_ending: Defines the sequence of characters to use between the lines of text
+ recognized by the OCR skill. The default value is "space". Possible values include: "space",
+ "carriageReturn", "lineFeed", "carriageReturnLineFeed".
+ :paramtype line_ending: str or ~azure.search.documents.indexes.models.LineEnding
+ """
super(OcrSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Vision.OcrSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
@@ -3662,10 +4745,10 @@ class OutputFieldMappingEntry(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the output defined by the skill.
- :paramtype name: str
- :keyword target_name: The target name of the output. It is optional and default to name.
- :paramtype target_name: str
+ :ivar name: Required. The name of the output defined by the skill.
+ :vartype name: str
+ :ivar target_name: The target name of the output. It is optional and default to name.
+ :vartype target_name: str
"""
_validation = {
@@ -3681,6 +4764,12 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the output defined by the skill.
+ :paramtype name: str
+ :keyword target_name: The target name of the output. It is optional and default to name.
+ :paramtype target_name: str
+ """
super(OutputFieldMappingEntry, self).__init__(**kwargs)
self.name = kwargs['name']
self.target_name = kwargs.get('target_name', None)
@@ -3691,24 +4780,24 @@ class PathHierarchyTokenizerV2(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword delimiter: The delimiter character to use. Default is "/".
- :paramtype delimiter: str
- :keyword replacement: A value that, if set, replaces the delimiter character. Default is "/".
- :paramtype replacement: str
- :keyword max_token_length: The maximum token length. Default and maximum is 300.
- :paramtype max_token_length: int
- :keyword reverse_token_order: A value indicating whether to generate tokens in reverse order.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar delimiter: The delimiter character to use. Default is "/".
+ :vartype delimiter: str
+ :ivar replacement: A value that, if set, replaces the delimiter character. Default is "/".
+ :vartype replacement: str
+ :ivar max_token_length: The maximum token length. Default and maximum is 300.
+ :vartype max_token_length: int
+ :ivar reverse_token_order: A value indicating whether to generate tokens in reverse order.
Default is false.
- :paramtype reverse_token_order: bool
- :keyword number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0.
- :paramtype number_of_tokens_to_skip: int
+ :vartype reverse_token_order: bool
+ :ivar number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0.
+ :vartype number_of_tokens_to_skip: int
"""
_validation = {
@@ -3731,6 +4820,23 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword delimiter: The delimiter character to use. Default is "/".
+ :paramtype delimiter: str
+ :keyword replacement: A value that, if set, replaces the delimiter character. Default is "/".
+ :paramtype replacement: str
+ :keyword max_token_length: The maximum token length. Default and maximum is 300.
+ :paramtype max_token_length: int
+ :keyword reverse_token_order: A value indicating whether to generate tokens in reverse order.
+ Default is false.
+ :paramtype reverse_token_order: bool
+ :keyword number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0.
+ :paramtype number_of_tokens_to_skip: int
+ """
super(PathHierarchyTokenizerV2, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PathHierarchyTokenizerV2' # type: str
self.delimiter = kwargs.get('delimiter', "/")
@@ -3740,52 +4846,29 @@ def __init__(
self.number_of_tokens_to_skip = kwargs.get('number_of_tokens_to_skip', 0)
-class Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
- """Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema.
-
- :keyword document_keys: document keys to be reset.
- :paramtype document_keys: list[str]
- :keyword datasource_document_ids: datasource document identifiers to be reset.
- :paramtype datasource_document_ids: list[str]
- """
-
- _attribute_map = {
- 'document_keys': {'key': 'documentKeys', 'type': '[str]'},
- 'datasource_document_ids': {'key': 'datasourceDocumentIds', 'type': '[str]'},
- }
-
- def __init__(
- self,
- **kwargs
- ):
- super(Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
- self.document_keys = kwargs.get('document_keys', None)
- self.datasource_document_ids = kwargs.get('datasource_document_ids', None)
-
-
class PatternAnalyzer(LexicalAnalyzer):
"""Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword lower_case_terms: A value indicating whether terms should be lower-cased. Default is
+ :vartype odata_type: str
+ :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar lower_case_terms: A value indicating whether terms should be lower-cased. Default is
true.
- :paramtype lower_case_terms: bool
- :keyword pattern: A regular expression pattern to match token separators. Default is an
- expression that matches one or more non-word characters.
- :paramtype pattern: str
- :keyword flags: Regular expression flags. Possible values include: "CANON_EQ",
- "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
- :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags
- :keyword stopwords: A list of stopwords.
- :paramtype stopwords: list[str]
+ :vartype lower_case_terms: bool
+ :ivar pattern: A regular expression pattern to match token separators. Default is an expression
+ that matches one or more non-word characters.
+ :vartype pattern: str
+ :ivar flags: Regular expression flags. Possible values include: "CANON_EQ", "CASE_INSENSITIVE",
+ "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
+ :vartype flags: str or ~azure.search.documents.indexes.models.RegexFlags
+ :ivar stopwords: A list of stopwords.
+ :vartype stopwords: list[str]
"""
_validation = {
@@ -3806,6 +4889,23 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword lower_case_terms: A value indicating whether terms should be lower-cased. Default is
+ true.
+ :paramtype lower_case_terms: bool
+ :keyword pattern: A regular expression pattern to match token separators. Default is an
+ expression that matches one or more non-word characters.
+ :paramtype pattern: str
+ :keyword flags: Regular expression flags. Possible values include: "CANON_EQ",
+ "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
+ :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags
+ :keyword stopwords: A list of stopwords.
+ :paramtype stopwords: list[str]
+ """
super(PatternAnalyzer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternAnalyzer' # type: str
self.lower_case_terms = kwargs.get('lower_case_terms', True)
@@ -3819,18 +4919,18 @@ class PatternCaptureTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword patterns: Required. A list of patterns to match against each token.
- :paramtype patterns: list[str]
- :keyword preserve_original: A value indicating whether to return the original token even if one
- of the patterns matches. Default is true.
- :paramtype preserve_original: bool
+ :vartype name: str
+ :ivar patterns: Required. A list of patterns to match against each token.
+ :vartype patterns: list[str]
+ :ivar preserve_original: A value indicating whether to return the original token even if one of
+ the patterns matches. Default is true.
+ :vartype preserve_original: bool
"""
_validation = {
@@ -3850,6 +4950,17 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword patterns: Required. A list of patterns to match against each token.
+ :paramtype patterns: list[str]
+ :keyword preserve_original: A value indicating whether to return the original token even if one
+ of the patterns matches. Default is true.
+ :paramtype preserve_original: bool
+ """
super(PatternCaptureTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternCaptureTokenFilter' # type: str
self.patterns = kwargs['patterns']
@@ -3861,17 +4972,17 @@ class PatternReplaceCharFilter(CharFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the char filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the char filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the char filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword pattern: Required. A regular expression pattern.
- :paramtype pattern: str
- :keyword replacement: Required. The replacement text.
- :paramtype replacement: str
+ :vartype name: str
+ :ivar pattern: Required. A regular expression pattern.
+ :vartype pattern: str
+ :ivar replacement: Required. The replacement text.
+ :vartype replacement: str
"""
_validation = {
@@ -3892,6 +5003,16 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the char filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword pattern: Required. A regular expression pattern.
+ :paramtype pattern: str
+ :keyword replacement: Required. The replacement text.
+ :paramtype replacement: str
+ """
super(PatternReplaceCharFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternReplaceCharFilter' # type: str
self.pattern = kwargs['pattern']
@@ -3903,17 +5024,17 @@ class PatternReplaceTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword pattern: Required. A regular expression pattern.
- :paramtype pattern: str
- :keyword replacement: Required. The replacement text.
- :paramtype replacement: str
+ :vartype name: str
+ :ivar pattern: Required. A regular expression pattern.
+ :vartype pattern: str
+ :ivar replacement: Required. The replacement text.
+ :vartype replacement: str
"""
_validation = {
@@ -3934,6 +5055,16 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword pattern: Required. A regular expression pattern.
+ :paramtype pattern: str
+ :keyword replacement: Required. The replacement text.
+ :paramtype replacement: str
+ """
super(PatternReplaceTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' # type: str
self.pattern = kwargs['pattern']
@@ -3945,23 +5076,23 @@ class PatternTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword pattern: A regular expression pattern to match token separators. Default is an
- expression that matches one or more non-word characters.
- :paramtype pattern: str
- :keyword flags: Regular expression flags. Possible values include: "CANON_EQ",
- "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
- :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags
- :keyword group: The zero-based ordinal of the matching group in the regular expression pattern
- to extract into tokens. Use -1 if you want to use the entire pattern to split the input into
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar pattern: A regular expression pattern to match token separators. Default is an expression
+ that matches one or more non-word characters.
+ :vartype pattern: str
+ :ivar flags: Regular expression flags. Possible values include: "CANON_EQ", "CASE_INSENSITIVE",
+ "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
+ :vartype flags: str or ~azure.search.documents.indexes.models.RegexFlags
+ :ivar group: The zero-based ordinal of the matching group in the regular expression pattern to
+ extract into tokens. Use -1 if you want to use the entire pattern to split the input into
tokens, irrespective of matching groups. Default is -1.
- :paramtype group: int
+ :vartype group: int
"""
_validation = {
@@ -3981,6 +5112,22 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword pattern: A regular expression pattern to match token separators. Default is an
+ expression that matches one or more non-word characters.
+ :paramtype pattern: str
+ :keyword flags: Regular expression flags. Possible values include: "CANON_EQ",
+ "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
+ :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags
+ :keyword group: The zero-based ordinal of the matching group in the regular expression pattern
+ to extract into tokens. Use -1 if you want to use the entire pattern to split the input into
+ tokens, irrespective of matching groups. Default is -1.
+ :paramtype group: int
+ """
super(PatternTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternTokenizer' # type: str
self.pattern = kwargs.get('pattern', "\W+")
@@ -3993,20 +5140,20 @@ class PhoneticTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword encoder: The phonetic encoder to use. Default is "metaphone". Possible values include:
+ :vartype name: str
+ :ivar encoder: The phonetic encoder to use. Default is "metaphone". Possible values include:
"metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2",
"cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse".
- :paramtype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder
- :keyword replace_original_tokens: A value indicating whether encoded tokens should replace
+ :vartype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder
+ :ivar replace_original_tokens: A value indicating whether encoded tokens should replace
original tokens. If false, encoded tokens are added as synonyms. Default is true.
- :paramtype replace_original_tokens: bool
+ :vartype replace_original_tokens: bool
"""
_validation = {
@@ -4025,6 +5172,19 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword encoder: The phonetic encoder to use. Default is "metaphone". Possible values include:
+ "metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2",
+ "cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse".
+ :paramtype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder
+ :keyword replace_original_tokens: A value indicating whether encoded tokens should replace
+ original tokens. If false, encoded tokens are added as synonyms. Default is true.
+ :paramtype replace_original_tokens: bool
+ """
super(PhoneticTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.PhoneticTokenFilter' # type: str
self.encoder = kwargs.get('encoder', None)
@@ -4036,48 +5196,47 @@ class PIIDetectionSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
- :paramtype default_language_code: str
- :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
+ :vartype default_language_code: str
+ :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
- :paramtype minimum_precision: float
- :keyword masking_mode: A parameter that provides various ways to mask the personal information
+ :vartype minimum_precision: float
+ :ivar masking_mode: A parameter that provides various ways to mask the personal information
detected in the input text. Default is 'none'. Possible values include: "none", "replace".
- :paramtype masking_mode: str or
+ :vartype masking_mode: str or
~azure.search.documents.indexes.models.PIIDetectionSkillMaskingMode
- :keyword masking_character: The character used to mask the text if the maskingMode parameter is
+ :ivar masking_character: The character used to mask the text if the maskingMode parameter is
set to replace. Default is '*'.
- :paramtype masking_character: str
- :keyword model_version: The version of the model to use when calling the Text Analytics
- service. It will default to the latest available when not specified. We recommend you do not
- specify this value unless absolutely necessary.
- :paramtype model_version: str
- :keyword pii_categories: A list of PII entity categories that should be extracted and masked.
- :paramtype pii_categories: list[str]
- :keyword domain: If specified, will set the PII domain to include only a subset of the entity
+ :vartype masking_character: str
+ :ivar model_version: The version of the model to use when calling the Text Analytics service.
+ It will default to the latest available when not specified. We recommend you do not specify
+ this value unless absolutely necessary.
+ :vartype model_version: str
+ :ivar pii_categories: A list of PII entity categories that should be extracted and masked.
+ :vartype pii_categories: list[str]
+ :ivar domain: If specified, will set the PII domain to include only a subset of the entity
categories. Possible values include: 'phi', 'none'. Default is 'none'.
- :paramtype domain: str
+ :vartype domain: str
"""
_validation = {
@@ -4108,6 +5267,47 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :paramtype default_language_code: str
+ :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ confidence score is greater than the value specified. If not set (default), or if explicitly
+ set to null, all entities will be included.
+ :paramtype minimum_precision: float
+ :keyword masking_mode: A parameter that provides various ways to mask the personal information
+ detected in the input text. Default is 'none'. Possible values include: "none", "replace".
+ :paramtype masking_mode: str or
+ ~azure.search.documents.indexes.models.PIIDetectionSkillMaskingMode
+ :keyword masking_character: The character used to mask the text if the maskingMode parameter is
+ set to replace. Default is '*'.
+ :paramtype masking_character: str
+ :keyword model_version: The version of the model to use when calling the Text Analytics
+ service. It will default to the latest available when not specified. We recommend you do not
+ specify this value unless absolutely necessary.
+ :paramtype model_version: str
+ :keyword pii_categories: A list of PII entity categories that should be extracted and masked.
+ :paramtype pii_categories: list[str]
+ :keyword domain: If specified, will set the PII domain to include only a subset of the entity
+ categories. Possible values include: 'phi', 'none'. Default is 'none'.
+ :paramtype domain: str
+ """
super(PIIDetectionSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.PIIDetectionSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
@@ -4122,8 +5322,8 @@ def __init__(
class RequestOptions(msrest.serialization.Model):
"""Parameter group.
- :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
- :paramtype x_ms_client_request_id: str
+ :ivar x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
+ :vartype x_ms_client_request_id: str
"""
_attribute_map = {
@@ -4134,6 +5334,10 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
+ :paramtype x_ms_client_request_id: str
+ """
super(RequestOptions, self).__init__(**kwargs)
self.x_ms_client_request_id = kwargs.get('x_ms_client_request_id', None)
@@ -4143,10 +5347,10 @@ class ResourceCounter(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword usage: Required. The resource usage amount.
- :paramtype usage: long
- :keyword quota: The resource amount quota.
- :paramtype quota: long
+ :ivar usage: Required. The resource usage amount.
+ :vartype usage: long
+ :ivar quota: The resource amount quota.
+ :vartype quota: long
"""
_validation = {
@@ -4162,6 +5366,12 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword usage: Required. The resource usage amount.
+ :paramtype usage: long
+ :keyword quota: The resource amount quota.
+ :paramtype quota: long
+ """
super(ResourceCounter, self).__init__(**kwargs)
self.usage = kwargs['usage']
self.quota = kwargs.get('quota', None)
@@ -4172,17 +5382,17 @@ class ScoringProfile(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the scoring profile.
- :paramtype name: str
- :keyword text_weights: Parameters that boost scoring based on text matches in certain index
+ :ivar name: Required. The name of the scoring profile.
+ :vartype name: str
+ :ivar text_weights: Parameters that boost scoring based on text matches in certain index
fields.
- :paramtype text_weights: ~azure.search.documents.indexes.models.TextWeights
- :keyword functions: The collection of functions that influence the scoring of documents.
- :paramtype functions: list[~azure.search.documents.indexes.models.ScoringFunction]
- :keyword function_aggregation: A value indicating how the results of individual scoring
- functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions.
- Possible values include: "sum", "average", "minimum", "maximum", "firstMatching".
- :paramtype function_aggregation: str or
+ :vartype text_weights: ~azure.search.documents.indexes.models.TextWeights
+ :ivar functions: The collection of functions that influence the scoring of documents.
+ :vartype functions: list[~azure.search.documents.indexes.models.ScoringFunction]
+ :ivar function_aggregation: A value indicating how the results of individual scoring functions
+ should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Possible
+ values include: "sum", "average", "minimum", "maximum", "firstMatching".
+ :vartype function_aggregation: str or
~azure.search.documents.indexes.models.ScoringFunctionAggregation
"""
@@ -4201,6 +5411,20 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the scoring profile.
+ :paramtype name: str
+ :keyword text_weights: Parameters that boost scoring based on text matches in certain index
+ fields.
+ :paramtype text_weights: ~azure.search.documents.indexes.models.TextWeights
+ :keyword functions: The collection of functions that influence the scoring of documents.
+ :paramtype functions: list[~azure.search.documents.indexes.models.ScoringFunction]
+ :keyword function_aggregation: A value indicating how the results of individual scoring
+ functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions.
+ Possible values include: "sum", "average", "minimum", "maximum", "firstMatching".
+ :paramtype function_aggregation: str or
+ ~azure.search.documents.indexes.models.ScoringFunctionAggregation
+ """
super(ScoringProfile, self).__init__(**kwargs)
self.name = kwargs['name']
self.text_weights = kwargs.get('text_weights', None)
@@ -4239,6 +5463,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchError, self).__init__(**kwargs)
self.code = None
self.message = None
@@ -4250,43 +5476,43 @@ class SearchField(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the field, which must be unique within the fields
- collection of the index or parent field.
- :paramtype name: str
- :keyword type: Required. The data type of the field. Possible values include: "Edm.String",
+ :ivar name: Required. The name of the field, which must be unique within the fields collection
+ of the index or parent field.
+ :vartype name: str
+ :ivar type: Required. The data type of the field. Possible values include: "Edm.String",
"Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset",
"Edm.GeographyPoint", "Edm.ComplexType".
- :paramtype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType
- :keyword key: A value indicating whether the field uniquely identifies documents in the index.
+ :vartype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType
+ :ivar key: A value indicating whether the field uniquely identifies documents in the index.
Exactly one top-level field in each index must be chosen as the key field and it must be of
type Edm.String. Key fields can be used to look up documents directly and update or delete
specific documents. Default is false for simple fields and null for complex fields.
- :paramtype key: bool
- :keyword retrievable: A value indicating whether the field can be returned in a search result.
- You can disable this option if you want to use a field (for example, margin) as a filter,
- sorting, or scoring mechanism but do not want the field to be visible to the end user. This
- property must be true for key fields, and it must be null for complex fields. This property can
- be changed on existing fields. Enabling this property does not cause any increase in index
- storage requirements. Default is true for simple fields and null for complex fields.
- :paramtype retrievable: bool
- :keyword searchable: A value indicating whether the field is full-text searchable. This means
- it will undergo analysis such as word-breaking during indexing. If you set a searchable field
- to a value like "sunny day", internally it will be split into the individual tokens "sunny" and
+ :vartype key: bool
+ :ivar retrievable: A value indicating whether the field can be returned in a search result. You
+ can disable this option if you want to use a field (for example, margin) as a filter, sorting,
+ or scoring mechanism but do not want the field to be visible to the end user. This property
+ must be true for key fields, and it must be null for complex fields. This property can be
+ changed on existing fields. Enabling this property does not cause any increase in index storage
+ requirements. Default is true for simple fields and null for complex fields.
+ :vartype retrievable: bool
+ :ivar searchable: A value indicating whether the field is full-text searchable. This means it
+ will undergo analysis such as word-breaking during indexing. If you set a searchable field to a
+ value like "sunny day", internally it will be split into the individual tokens "sunny" and
"day". This enables full-text searches for these terms. Fields of type Edm.String or
Collection(Edm.String) are searchable by default. This property must be false for simple fields
of other non-string data types, and it must be null for complex fields. Note: searchable fields
consume extra space in your index since Azure Cognitive Search will store an additional
tokenized version of the field value for full-text searches. If you want to save space in your
index and you don't need a field to be included in searches, set searchable to false.
- :paramtype searchable: bool
- :keyword filterable: A value indicating whether to enable the field to be referenced in $filter
+ :vartype searchable: bool
+ :ivar filterable: A value indicating whether to enable the field to be referenced in $filter
queries. filterable differs from searchable in how strings are handled. Fields of type
Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so
comparisons are for exact matches only. For example, if you set such a field f to "sunny day",
$filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property
must be null for complex fields. Default is true for simple fields and null for complex fields.
- :paramtype filterable: bool
- :keyword sortable: A value indicating whether to enable the field to be referenced in $orderby
+ :vartype filterable: bool
+ :ivar sortable: A value indicating whether to enable the field to be referenced in $orderby
expressions. By default Azure Cognitive Search sorts results by score, but in many experiences
users will want to sort by fields in the documents. A simple field can be sortable only if it
is single-valued (it has a single value in the scope of the parent document). Simple collection
@@ -4296,15 +5522,15 @@ class SearchField(msrest.serialization.Model):
cannot be sortable and the sortable property must be null for such fields. The default for
sortable is true for single-valued simple fields, false for multi-valued simple fields, and
null for complex fields.
- :paramtype sortable: bool
- :keyword facetable: A value indicating whether to enable the field to be referenced in facet
+ :vartype sortable: bool
+ :ivar facetable: A value indicating whether to enable the field to be referenced in facet
queries. Typically used in a presentation of search results that includes hit count by category
(for example, search for digital cameras and see hits by brand, by megapixels, by price, and so
on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or
Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple
fields.
- :paramtype facetable: bool
- :keyword analyzer: The name of the analyzer to use for the field. This option can be used only
+ :vartype facetable: bool
+ :ivar analyzer: The name of the analyzer to use for the field. This option can be used only
with searchable fields and it can't be set together with either searchAnalyzer or
indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null
for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene",
@@ -4324,11 +5550,11 @@ class SearchField(msrest.serialization.Model):
"th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft",
"vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern",
"simple", "stop", "whitespace".
- :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
- :keyword search_analyzer: The name of the analyzer used at search time for the field. This
- option can be used only with searchable fields. It must be set together with indexAnalyzer and
- it cannot be set together with the analyzer option. This property cannot be set to the name of
- a language analyzer; use the analyzer property instead if you need a language analyzer. This
+ :vartype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :ivar search_analyzer: The name of the analyzer used at search time for the field. This option
+ can be used only with searchable fields. It must be set together with indexAnalyzer and it
+ cannot be set together with the analyzer option. This property cannot be set to the name of a
+ language analyzer; use the analyzer property instead if you need a language analyzer. This
analyzer can be updated on an existing field. Must be null for complex fields. Possible values
include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft",
"bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene",
@@ -4347,12 +5573,12 @@ class SearchField(msrest.serialization.Model):
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
- :paramtype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
- :keyword index_analyzer: The name of the analyzer used at indexing time for the field. This
- option can be used only with searchable fields. It must be set together with searchAnalyzer and
- it cannot be set together with the analyzer option. This property cannot be set to the name of
- a language analyzer; use the analyzer property instead if you need a language analyzer. Once
- the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields.
+ :vartype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :ivar index_analyzer: The name of the analyzer used at indexing time for the field. This option
+ can be used only with searchable fields. It must be set together with searchAnalyzer and it
+ cannot be set together with the analyzer option. This property cannot be set to the name of a
+ language analyzer; use the analyzer property instead if you need a language analyzer. Once the
+ analyzer is chosen, it cannot be changed for the field. Must be null for complex fields.
Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene",
"bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft",
"zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft",
@@ -4370,21 +5596,21 @@ class SearchField(msrest.serialization.Model):
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
- :paramtype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
- :keyword normalizer: The name of the normalizer to use for the field. This option can be used
- only with fields with filterable, sortable, or facetable enabled. Once the normalizer is
- chosen, it cannot be changed for the field. Must be null for complex fields. Possible values
- include: "asciifolding", "elision", "lowercase", "standard", "uppercase".
- :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
- :keyword synonym_maps: A list of the names of synonym maps to associate with this field. This
+ :vartype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :ivar normalizer: The name of the normalizer to use for the field. This option can be used only
+ with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it
+ cannot be changed for the field. Must be null for complex fields. Possible values include:
+ "asciifolding", "elision", "lowercase", "standard", "uppercase".
+ :vartype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
+ :ivar synonym_maps: A list of the names of synonym maps to associate with this field. This
option can be used only with searchable fields. Currently only one synonym map per field is
supported. Assigning a synonym map to a field ensures that query terms targeting that field are
expanded at query-time using the rules in the synonym map. This attribute can be changed on
existing fields. Must be null or an empty collection for complex fields.
- :paramtype synonym_maps: list[str]
- :keyword fields: A list of sub-fields if this is a field of type Edm.ComplexType or
+ :vartype synonym_maps: list[str]
+ :ivar fields: A list of sub-fields if this is a field of type Edm.ComplexType or
Collection(Edm.ComplexType). Must be null or empty for simple fields.
- :paramtype fields: list[~azure.search.documents.indexes.models.SearchField]
+ :vartype fields: list[~azure.search.documents.indexes.models.SearchField]
"""
_validation = {
@@ -4413,6 +5639,143 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the field, which must be unique within the fields
+ collection of the index or parent field.
+ :paramtype name: str
+ :keyword type: Required. The data type of the field. Possible values include: "Edm.String",
+ "Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset",
+ "Edm.GeographyPoint", "Edm.ComplexType".
+ :paramtype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType
+ :keyword key: A value indicating whether the field uniquely identifies documents in the index.
+ Exactly one top-level field in each index must be chosen as the key field and it must be of
+ type Edm.String. Key fields can be used to look up documents directly and update or delete
+ specific documents. Default is false for simple fields and null for complex fields.
+ :paramtype key: bool
+ :keyword retrievable: A value indicating whether the field can be returned in a search result.
+ You can disable this option if you want to use a field (for example, margin) as a filter,
+ sorting, or scoring mechanism but do not want the field to be visible to the end user. This
+ property must be true for key fields, and it must be null for complex fields. This property can
+ be changed on existing fields. Enabling this property does not cause any increase in index
+ storage requirements. Default is true for simple fields and null for complex fields.
+ :paramtype retrievable: bool
+ :keyword searchable: A value indicating whether the field is full-text searchable. This means
+ it will undergo analysis such as word-breaking during indexing. If you set a searchable field
+ to a value like "sunny day", internally it will be split into the individual tokens "sunny" and
+ "day". This enables full-text searches for these terms. Fields of type Edm.String or
+ Collection(Edm.String) are searchable by default. This property must be false for simple fields
+ of other non-string data types, and it must be null for complex fields. Note: searchable fields
+ consume extra space in your index since Azure Cognitive Search will store an additional
+ tokenized version of the field value for full-text searches. If you want to save space in your
+ index and you don't need a field to be included in searches, set searchable to false.
+ :paramtype searchable: bool
+ :keyword filterable: A value indicating whether to enable the field to be referenced in $filter
+ queries. filterable differs from searchable in how strings are handled. Fields of type
+ Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so
+ comparisons are for exact matches only. For example, if you set such a field f to "sunny day",
+ $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property
+ must be null for complex fields. Default is true for simple fields and null for complex fields.
+ :paramtype filterable: bool
+ :keyword sortable: A value indicating whether to enable the field to be referenced in $orderby
+ expressions. By default Azure Cognitive Search sorts results by score, but in many experiences
+ users will want to sort by fields in the documents. A simple field can be sortable only if it
+ is single-valued (it has a single value in the scope of the parent document). Simple collection
+ fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex
+ collections are also multi-valued, and therefore cannot be sortable. This is true whether it's
+ an immediate parent field, or an ancestor field, that's the complex collection. Complex fields
+ cannot be sortable and the sortable property must be null for such fields. The default for
+ sortable is true for single-valued simple fields, false for multi-valued simple fields, and
+ null for complex fields.
+ :paramtype sortable: bool
+ :keyword facetable: A value indicating whether to enable the field to be referenced in facet
+ queries. Typically used in a presentation of search results that includes hit count by category
+ (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so
+ on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or
+ Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple
+ fields.
+ :paramtype facetable: bool
+ :keyword analyzer: The name of the analyzer to use for the field. This option can be used only
+ with searchable fields and it can't be set together with either searchAnalyzer or
+ indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null
+ for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene",
+ "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene",
+ "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft",
+ "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene",
+ "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft",
+ "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene",
+ "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene",
+ "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene",
+ "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft",
+ "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft",
+ "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene",
+ "pt-PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft",
+ "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft",
+ "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft",
+ "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft",
+ "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern",
+ "simple", "stop", "whitespace".
+ :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :keyword search_analyzer: The name of the analyzer used at search time for the field. This
+ option can be used only with searchable fields. It must be set together with indexAnalyzer and
+ it cannot be set together with the analyzer option. This property cannot be set to the name of
+ a language analyzer; use the analyzer property instead if you need a language analyzer. This
+ analyzer can be updated on an existing field. Must be null for complex fields. Possible values
+ include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft",
+ "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene",
+ "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene",
+ "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene",
+ "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene",
+ "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft",
+ "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft",
+ "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene",
+ "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft",
+ "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene",
+ "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft",
+ "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene",
+ "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
+ "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
+ "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
+ "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
+ "whitespace".
+ :paramtype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :keyword index_analyzer: The name of the analyzer used at indexing time for the field. This
+ option can be used only with searchable fields. It must be set together with searchAnalyzer and
+ it cannot be set together with the analyzer option. This property cannot be set to the name of
+ a language analyzer; use the analyzer property instead if you need a language analyzer. Once
+ the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields.
+ Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene",
+ "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft",
+ "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft",
+ "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft",
+ "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene",
+ "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft",
+ "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft",
+ "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft",
+ "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene",
+ "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene",
+ "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft",
+ "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene",
+ "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
+ "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
+ "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
+ "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
+ "whitespace".
+ :paramtype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :keyword normalizer: The name of the normalizer to use for the field. This option can be used
+ only with fields with filterable, sortable, or facetable enabled. Once the normalizer is
+ chosen, it cannot be changed for the field. Must be null for complex fields. Possible values
+ include: "asciifolding", "elision", "lowercase", "standard", "uppercase".
+ :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
+ :keyword synonym_maps: A list of the names of synonym maps to associate with this field. This
+ option can be used only with searchable fields. Currently only one synonym map per field is
+ supported. Assigning a synonym map to a field ensures that query terms targeting that field are
+ expanded at query-time using the rules in the synonym map. This attribute can be changed on
+ existing fields. Must be null or an empty collection for complex fields.
+ :paramtype synonym_maps: list[str]
+ :keyword fields: A list of sub-fields if this is a field of type Edm.ComplexType or
+ Collection(Edm.ComplexType). Must be null or empty for simple fields.
+ :paramtype fields: list[~azure.search.documents.indexes.models.SearchField]
+ """
super(SearchField, self).__init__(**kwargs)
self.name = kwargs['name']
self.type = kwargs['type']
@@ -4435,31 +5798,31 @@ class SearchIndex(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the index.
- :paramtype name: str
- :keyword fields: Required. The fields of the index.
- :paramtype fields: list[~azure.search.documents.indexes.models.SearchField]
- :keyword scoring_profiles: The scoring profiles for the index.
- :paramtype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile]
- :keyword default_scoring_profile: The name of the scoring profile to use if none is specified
- in the query. If this property is not set and no scoring profile is specified in the query,
- then default scoring (tf-idf) will be used.
- :paramtype default_scoring_profile: str
- :keyword cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index.
- :paramtype cors_options: ~azure.search.documents.indexes.models.CorsOptions
- :keyword suggesters: The suggesters for the index.
- :paramtype suggesters: list[~azure.search.documents.indexes.models.Suggester]
- :keyword analyzers: The analyzers for the index.
- :paramtype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer]
- :keyword tokenizers: The tokenizers for the index.
- :paramtype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer]
- :keyword token_filters: The token filters for the index.
- :paramtype token_filters: list[~azure.search.documents.indexes.models.TokenFilter]
- :keyword char_filters: The character filters for the index.
- :paramtype char_filters: list[~azure.search.documents.indexes.models.CharFilter]
- :keyword normalizers: The normalizers for the index.
- :paramtype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer]
- :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ :ivar name: Required. The name of the index.
+ :vartype name: str
+ :ivar fields: Required. The fields of the index.
+ :vartype fields: list[~azure.search.documents.indexes.models.SearchField]
+ :ivar scoring_profiles: The scoring profiles for the index.
+ :vartype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile]
+ :ivar default_scoring_profile: The name of the scoring profile to use if none is specified in
+ the query. If this property is not set and no scoring profile is specified in the query, then
+ default scoring (tf-idf) will be used.
+ :vartype default_scoring_profile: str
+ :ivar cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index.
+ :vartype cors_options: ~azure.search.documents.indexes.models.CorsOptions
+ :ivar suggesters: The suggesters for the index.
+ :vartype suggesters: list[~azure.search.documents.indexes.models.Suggester]
+ :ivar analyzers: The analyzers for the index.
+ :vartype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer]
+ :ivar tokenizers: The tokenizers for the index.
+ :vartype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer]
+ :ivar token_filters: The token filters for the index.
+ :vartype token_filters: list[~azure.search.documents.indexes.models.TokenFilter]
+ :ivar char_filters: The character filters for the index.
+ :vartype char_filters: list[~azure.search.documents.indexes.models.CharFilter]
+ :ivar normalizers: The normalizers for the index.
+ :vartype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer]
+ :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your data when you
want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
@@ -4467,14 +5830,14 @@ class SearchIndex(msrest.serialization.Model):
needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
customer-managed keys is not available for free search services, and is only available for paid
services created on or after January 1, 2019.
- :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
- :keyword similarity: The type of similarity algorithm to be used when scoring and ranking the
+ :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :ivar similarity: The type of similarity algorithm to be used when scoring and ranking the
documents matching a search query. The similarity algorithm can only be defined at index
creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity
algorithm is used.
- :paramtype similarity: ~azure.search.documents.indexes.models.Similarity
- :keyword e_tag: The ETag of the index.
- :paramtype e_tag: str
+ :vartype similarity: ~azure.search.documents.indexes.models.Similarity
+ :ivar e_tag: The ETag of the index.
+ :vartype e_tag: str
"""
_validation = {
@@ -4503,6 +5866,48 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the index.
+ :paramtype name: str
+ :keyword fields: Required. The fields of the index.
+ :paramtype fields: list[~azure.search.documents.indexes.models.SearchField]
+ :keyword scoring_profiles: The scoring profiles for the index.
+ :paramtype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile]
+ :keyword default_scoring_profile: The name of the scoring profile to use if none is specified
+ in the query. If this property is not set and no scoring profile is specified in the query,
+ then default scoring (tf-idf) will be used.
+ :paramtype default_scoring_profile: str
+ :keyword cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index.
+ :paramtype cors_options: ~azure.search.documents.indexes.models.CorsOptions
+ :keyword suggesters: The suggesters for the index.
+ :paramtype suggesters: list[~azure.search.documents.indexes.models.Suggester]
+ :keyword analyzers: The analyzers for the index.
+ :paramtype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer]
+ :keyword tokenizers: The tokenizers for the index.
+ :paramtype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer]
+ :keyword token_filters: The token filters for the index.
+ :paramtype token_filters: list[~azure.search.documents.indexes.models.TokenFilter]
+ :keyword char_filters: The character filters for the index.
+ :paramtype char_filters: list[~azure.search.documents.indexes.models.CharFilter]
+ :keyword normalizers: The normalizers for the index.
+ :paramtype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer]
+ :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ This key is used to provide an additional level of encryption-at-rest for your data when you
+ want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
+ Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
+ Search will ignore attempts to set this property to null. You can change this property as
+ needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
+ customer-managed keys is not available for free search services, and is only available for paid
+ services created on or after January 1, 2019.
+ :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :keyword similarity: The type of similarity algorithm to be used when scoring and ranking the
+ documents matching a search query. The similarity algorithm can only be defined at index
+ creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity
+ algorithm is used.
+ :paramtype similarity: ~azure.search.documents.indexes.models.Similarity
+ :keyword e_tag: The ETag of the index.
+ :paramtype e_tag: str
+ """
super(SearchIndex, self).__init__(**kwargs)
self.name = kwargs['name']
self.fields = kwargs['fields']
@@ -4525,32 +5930,32 @@ class SearchIndexer(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the indexer.
- :paramtype name: str
- :keyword description: The description of the indexer.
- :paramtype description: str
- :keyword data_source_name: Required. The name of the datasource from which this indexer reads
+ :ivar name: Required. The name of the indexer.
+ :vartype name: str
+ :ivar description: The description of the indexer.
+ :vartype description: str
+ :ivar data_source_name: Required. The name of the datasource from which this indexer reads
data.
- :paramtype data_source_name: str
- :keyword skillset_name: The name of the skillset executing with this indexer.
- :paramtype skillset_name: str
- :keyword target_index_name: Required. The name of the index to which this indexer writes data.
- :paramtype target_index_name: str
- :keyword schedule: The schedule for this indexer.
- :paramtype schedule: ~azure.search.documents.indexes.models.IndexingSchedule
- :keyword parameters: Parameters for indexer execution.
- :paramtype parameters: ~azure.search.documents.indexes.models.IndexingParameters
- :keyword field_mappings: Defines mappings between fields in the data source and corresponding
+ :vartype data_source_name: str
+ :ivar skillset_name: The name of the skillset executing with this indexer.
+ :vartype skillset_name: str
+ :ivar target_index_name: Required. The name of the index to which this indexer writes data.
+ :vartype target_index_name: str
+ :ivar schedule: The schedule for this indexer.
+ :vartype schedule: ~azure.search.documents.indexes.models.IndexingSchedule
+ :ivar parameters: Parameters for indexer execution.
+ :vartype parameters: ~azure.search.documents.indexes.models.IndexingParameters
+ :ivar field_mappings: Defines mappings between fields in the data source and corresponding
target fields in the index.
- :paramtype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
- :keyword output_field_mappings: Output field mappings are applied after enrichment and
- immediately before indexing.
- :paramtype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
- :keyword is_disabled: A value indicating whether the indexer is disabled. Default is false.
- :paramtype is_disabled: bool
- :keyword e_tag: The ETag of the indexer.
- :paramtype e_tag: str
- :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ :vartype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
+ :ivar output_field_mappings: Output field mappings are applied after enrichment and immediately
+ before indexing.
+ :vartype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
+ :ivar is_disabled: A value indicating whether the indexer is disabled. Default is false.
+ :vartype is_disabled: bool
+ :ivar e_tag: The ETag of the indexer.
+ :vartype e_tag: str
+ :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your indexer
definition (as well as indexer execution status) when you want full assurance that no one, not
even Microsoft, can decrypt them in Azure Cognitive Search. Once you have encrypted your
@@ -4559,10 +5964,10 @@ class SearchIndexer(msrest.serialization.Model):
rotate your encryption key; Your indexer definition (and indexer execution status) will be
unaffected. Encryption with customer-managed keys is not available for free search services,
and is only available for paid services created on or after January 1, 2019.
- :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
- :keyword cache: Adds caching to an enrichment pipeline to allow for incremental modification
- steps without having to rebuild the index every time.
- :paramtype cache: ~azure.search.documents.indexes.models.SearchIndexerCache
+ :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :ivar cache: Adds caching to an enrichment pipeline to allow for incremental modification steps
+ without having to rebuild the index every time.
+ :vartype cache: ~azure.search.documents.indexes.models.SearchIndexerCache
"""
_validation = {
@@ -4591,6 +5996,46 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the indexer.
+ :paramtype name: str
+ :keyword description: The description of the indexer.
+ :paramtype description: str
+ :keyword data_source_name: Required. The name of the datasource from which this indexer reads
+ data.
+ :paramtype data_source_name: str
+ :keyword skillset_name: The name of the skillset executing with this indexer.
+ :paramtype skillset_name: str
+ :keyword target_index_name: Required. The name of the index to which this indexer writes data.
+ :paramtype target_index_name: str
+ :keyword schedule: The schedule for this indexer.
+ :paramtype schedule: ~azure.search.documents.indexes.models.IndexingSchedule
+ :keyword parameters: Parameters for indexer execution.
+ :paramtype parameters: ~azure.search.documents.indexes.models.IndexingParameters
+ :keyword field_mappings: Defines mappings between fields in the data source and corresponding
+ target fields in the index.
+ :paramtype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
+ :keyword output_field_mappings: Output field mappings are applied after enrichment and
+ immediately before indexing.
+ :paramtype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
+ :keyword is_disabled: A value indicating whether the indexer is disabled. Default is false.
+ :paramtype is_disabled: bool
+ :keyword e_tag: The ETag of the indexer.
+ :paramtype e_tag: str
+ :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ This key is used to provide an additional level of encryption-at-rest for your indexer
+ definition (as well as indexer execution status) when you want full assurance that no one, not
+ even Microsoft, can decrypt them in Azure Cognitive Search. Once you have encrypted your
+ indexer definition, it will always remain encrypted. Azure Cognitive Search will ignore
+ attempts to set this property to null. You can change this property as needed if you want to
+ rotate your encryption key; Your indexer definition (and indexer execution status) will be
+ unaffected. Encryption with customer-managed keys is not available for free search services,
+ and is only available for paid services created on or after January 1, 2019.
+ :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :keyword cache: Adds caching to an enrichment pipeline to allow for incremental modification
+ steps without having to rebuild the index every time.
+ :paramtype cache: ~azure.search.documents.indexes.models.SearchIndexerCache
+ """
super(SearchIndexer, self).__init__(**kwargs)
self.name = kwargs['name']
self.description = kwargs.get('description', None)
@@ -4610,11 +6055,11 @@ def __init__(
class SearchIndexerCache(msrest.serialization.Model):
"""SearchIndexerCache.
- :keyword storage_connection_string: The connection string to the storage account where the
- cache data will be persisted.
- :paramtype storage_connection_string: str
- :keyword enable_reprocessing: Specifies whether incremental reprocessing is enabled.
- :paramtype enable_reprocessing: bool
+ :ivar storage_connection_string: The connection string to the storage account where the cache
+ data will be persisted.
+ :vartype storage_connection_string: str
+ :ivar enable_reprocessing: Specifies whether incremental reprocessing is enabled.
+ :vartype enable_reprocessing: bool
"""
_attribute_map = {
@@ -4626,6 +6071,13 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword storage_connection_string: The connection string to the storage account where the
+ cache data will be persisted.
+ :paramtype storage_connection_string: str
+ :keyword enable_reprocessing: Specifies whether incremental reprocessing is enabled.
+ :paramtype enable_reprocessing: bool
+ """
super(SearchIndexerCache, self).__init__(**kwargs)
self.storage_connection_string = kwargs.get('storage_connection_string', None)
self.enable_reprocessing = kwargs.get('enable_reprocessing', None)
@@ -4636,12 +6088,12 @@ class SearchIndexerDataContainer(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the table or view (for Azure SQL data source) or
- collection (for CosmosDB data source) that will be indexed.
- :paramtype name: str
- :keyword query: A query that is applied to this data container. The syntax and meaning of this
+ :ivar name: Required. The name of the table or view (for Azure SQL data source) or collection
+ (for CosmosDB data source) that will be indexed.
+ :vartype name: str
+ :ivar query: A query that is applied to this data container. The syntax and meaning of this
parameter is datasource-specific. Not supported by Azure SQL datasources.
- :paramtype query: str
+ :vartype query: str
"""
_validation = {
@@ -4657,6 +6109,14 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the table or view (for Azure SQL data source) or
+ collection (for CosmosDB data source) that will be indexed.
+ :paramtype name: str
+ :keyword query: A query that is applied to this data container. The syntax and meaning of this
+ parameter is datasource-specific. Not supported by Azure SQL datasources.
+ :paramtype query: str
+ """
super(SearchIndexerDataContainer, self).__init__(**kwargs)
self.name = kwargs['name']
self.query = kwargs.get('query', None)
@@ -4670,9 +6130,9 @@ class SearchIndexerDataIdentity(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the identity.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by
server.
- :paramtype odata_type: str
+ :vartype odata_type: str
"""
_validation = {
@@ -4691,6 +6151,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchIndexerDataIdentity, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
@@ -4700,9 +6162,9 @@ class SearchIndexerDataNoneIdentity(SearchIndexerDataIdentity):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the identity.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by
server.
- :paramtype odata_type: str
+ :vartype odata_type: str
"""
_validation = {
@@ -4717,6 +6179,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchIndexerDataNoneIdentity, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SearchIndexerDataNoneIdentity' # type: str
@@ -4726,31 +6190,31 @@ class SearchIndexerDataSource(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the datasource.
- :paramtype name: str
- :keyword description: The description of the datasource.
- :paramtype description: str
- :keyword type: Required. The type of the datasource. Possible values include: "azuresql",
+ :ivar name: Required. The name of the datasource.
+ :vartype name: str
+ :ivar description: The description of the datasource.
+ :vartype description: str
+ :ivar type: Required. The type of the datasource. Possible values include: "azuresql",
"cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2".
- :paramtype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType
- :keyword credentials: Required. Credentials for the datasource.
- :paramtype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials
- :keyword container: Required. The data container for the datasource.
- :paramtype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer
- :keyword identity: An explicit managed identity to use for this datasource. If not specified
- and the connection string is a managed identity, the system-assigned managed identity is used.
- If not specified, the value remains unchanged. If "none" is specified, the value of this
- property is cleared.
- :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
- :keyword data_change_detection_policy: The data change detection policy for the datasource.
- :paramtype data_change_detection_policy:
+ :vartype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType
+ :ivar credentials: Required. Credentials for the datasource.
+ :vartype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials
+ :ivar container: Required. The data container for the datasource.
+ :vartype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer
+ :ivar identity: An explicit managed identity to use for this datasource. If not specified and
+ the connection string is a managed identity, the system-assigned managed identity is used. If
+ not specified, the value remains unchanged. If "none" is specified, the value of this property
+ is cleared.
+ :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
+ :ivar data_change_detection_policy: The data change detection policy for the datasource.
+ :vartype data_change_detection_policy:
~azure.search.documents.indexes.models.DataChangeDetectionPolicy
- :keyword data_deletion_detection_policy: The data deletion detection policy for the datasource.
- :paramtype data_deletion_detection_policy:
+ :ivar data_deletion_detection_policy: The data deletion detection policy for the datasource.
+ :vartype data_deletion_detection_policy:
~azure.search.documents.indexes.models.DataDeletionDetectionPolicy
- :keyword e_tag: The ETag of the data source.
- :paramtype e_tag: str
- :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ :ivar e_tag: The ETag of the data source.
+ :vartype e_tag: str
+ :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your datasource
definition when you want full assurance that no one, not even Microsoft, can decrypt your data
source definition in Azure Cognitive Search. Once you have encrypted your data source
@@ -4759,7 +6223,7 @@ class SearchIndexerDataSource(msrest.serialization.Model):
encryption key; Your datasource definition will be unaffected. Encryption with customer-managed
keys is not available for free search services, and is only available for paid services created
on or after January 1, 2019.
- :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
"""
_validation = {
@@ -4786,6 +6250,42 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the datasource.
+ :paramtype name: str
+ :keyword description: The description of the datasource.
+ :paramtype description: str
+ :keyword type: Required. The type of the datasource. Possible values include: "azuresql",
+ "cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2".
+ :paramtype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType
+ :keyword credentials: Required. Credentials for the datasource.
+ :paramtype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials
+ :keyword container: Required. The data container for the datasource.
+ :paramtype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer
+ :keyword identity: An explicit managed identity to use for this datasource. If not specified
+ and the connection string is a managed identity, the system-assigned managed identity is used.
+ If not specified, the value remains unchanged. If "none" is specified, the value of this
+ property is cleared.
+ :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
+ :keyword data_change_detection_policy: The data change detection policy for the datasource.
+ :paramtype data_change_detection_policy:
+ ~azure.search.documents.indexes.models.DataChangeDetectionPolicy
+ :keyword data_deletion_detection_policy: The data deletion detection policy for the datasource.
+ :paramtype data_deletion_detection_policy:
+ ~azure.search.documents.indexes.models.DataDeletionDetectionPolicy
+ :keyword e_tag: The ETag of the data source.
+ :paramtype e_tag: str
+ :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ This key is used to provide an additional level of encryption-at-rest for your datasource
+ definition when you want full assurance that no one, not even Microsoft, can decrypt your data
+ source definition in Azure Cognitive Search. Once you have encrypted your data source
+ definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set
+ this property to null. You can change this property as needed if you want to rotate your
+ encryption key; Your datasource definition will be unaffected. Encryption with customer-managed
+ keys is not available for free search services, and is only available for paid services created
+ on or after January 1, 2019.
+ :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ """
super(SearchIndexerDataSource, self).__init__(**kwargs)
self.name = kwargs['name']
self.description = kwargs.get('description', None)
@@ -4804,14 +6304,14 @@ class SearchIndexerDataUserAssignedIdentity(SearchIndexerDataIdentity):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the identity.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by
server.
- :paramtype odata_type: str
- :keyword user_assigned_identity: Required. The fully qualified Azure resource Id of a user
+ :vartype odata_type: str
+ :ivar user_assigned_identity: Required. The fully qualified Azure resource Id of a user
assigned managed identity typically in the form
"/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId"
that should have been assigned to the search service.
- :paramtype user_assigned_identity: str
+ :vartype user_assigned_identity: str
"""
_validation = {
@@ -4828,6 +6328,13 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword user_assigned_identity: Required. The fully qualified Azure resource Id of a user
+ assigned managed identity typically in the form
+ "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId"
+ that should have been assigned to the search service.
+ :paramtype user_assigned_identity: str
+ """
super(SearchIndexerDataUserAssignedIdentity, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SearchIndexerDataUserAssignedIdentity' # type: str
self.user_assigned_identity = kwargs['user_assigned_identity']
@@ -4883,6 +6390,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchIndexerError, self).__init__(**kwargs)
self.key = None
self.error_message = None
@@ -4897,11 +6406,11 @@ class SearchIndexerKnowledgeStore(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword storage_connection_string: Required. The connection string to the storage account
+ :ivar storage_connection_string: Required. The connection string to the storage account
projections will be stored in.
- :paramtype storage_connection_string: str
- :keyword projections: Required. A list of additional projections to perform during indexing.
- :paramtype projections:
+ :vartype storage_connection_string: str
+ :ivar projections: Required. A list of additional projections to perform during indexing.
+ :vartype projections:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreProjection]
"""
@@ -4919,6 +6428,14 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword storage_connection_string: Required. The connection string to the storage account
+ projections will be stored in.
+ :paramtype storage_connection_string: str
+ :keyword projections: Required. A list of additional projections to perform during indexing.
+ :paramtype projections:
+ list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreProjection]
+ """
super(SearchIndexerKnowledgeStore, self).__init__(**kwargs)
self.storage_connection_string = kwargs['storage_connection_string']
self.projections = kwargs['projections']
@@ -4927,16 +6444,16 @@ def __init__(
class SearchIndexerKnowledgeStoreProjectionSelector(msrest.serialization.Model):
"""Abstract class to share properties between concrete selectors.
- :keyword reference_key_name: Name of reference key to different projection.
- :paramtype reference_key_name: str
- :keyword generated_key_name: Name of generated key to store projection under.
- :paramtype generated_key_name: str
- :keyword source: Source data to project.
- :paramtype source: str
- :keyword source_context: Source context for complex projections.
- :paramtype source_context: str
- :keyword inputs: Nested inputs for complex projections.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar reference_key_name: Name of reference key to different projection.
+ :vartype reference_key_name: str
+ :ivar generated_key_name: Name of generated key to store projection under.
+ :vartype generated_key_name: str
+ :ivar source: Source data to project.
+ :vartype source: str
+ :ivar source_context: Source context for complex projections.
+ :vartype source_context: str
+ :ivar inputs: Nested inputs for complex projections.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
"""
_attribute_map = {
@@ -4951,6 +6468,18 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword reference_key_name: Name of reference key to different projection.
+ :paramtype reference_key_name: str
+ :keyword generated_key_name: Name of generated key to store projection under.
+ :paramtype generated_key_name: str
+ :keyword source: Source data to project.
+ :paramtype source: str
+ :keyword source_context: Source context for complex projections.
+ :paramtype source_context: str
+ :keyword inputs: Nested inputs for complex projections.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ """
super(SearchIndexerKnowledgeStoreProjectionSelector, self).__init__(**kwargs)
self.reference_key_name = kwargs.get('reference_key_name', None)
self.generated_key_name = kwargs.get('generated_key_name', None)
@@ -4964,18 +6493,18 @@ class SearchIndexerKnowledgeStoreBlobProjectionSelector(SearchIndexerKnowledgeSt
All required parameters must be populated in order to send to Azure.
- :keyword reference_key_name: Name of reference key to different projection.
- :paramtype reference_key_name: str
- :keyword generated_key_name: Name of generated key to store projection under.
- :paramtype generated_key_name: str
- :keyword source: Source data to project.
- :paramtype source: str
- :keyword source_context: Source context for complex projections.
- :paramtype source_context: str
- :keyword inputs: Nested inputs for complex projections.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword storage_container: Required. Blob container to store projections in.
- :paramtype storage_container: str
+ :ivar reference_key_name: Name of reference key to different projection.
+ :vartype reference_key_name: str
+ :ivar generated_key_name: Name of generated key to store projection under.
+ :vartype generated_key_name: str
+ :ivar source: Source data to project.
+ :vartype source: str
+ :ivar source_context: Source context for complex projections.
+ :vartype source_context: str
+ :ivar inputs: Nested inputs for complex projections.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar storage_container: Required. Blob container to store projections in.
+ :vartype storage_container: str
"""
_validation = {
@@ -4995,6 +6524,20 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword reference_key_name: Name of reference key to different projection.
+ :paramtype reference_key_name: str
+ :keyword generated_key_name: Name of generated key to store projection under.
+ :paramtype generated_key_name: str
+ :keyword source: Source data to project.
+ :paramtype source: str
+ :keyword source_context: Source context for complex projections.
+ :paramtype source_context: str
+ :keyword inputs: Nested inputs for complex projections.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword storage_container: Required. Blob container to store projections in.
+ :paramtype storage_container: str
+ """
super(SearchIndexerKnowledgeStoreBlobProjectionSelector, self).__init__(**kwargs)
self.storage_container = kwargs['storage_container']
@@ -5004,18 +6547,18 @@ class SearchIndexerKnowledgeStoreFileProjectionSelector(SearchIndexerKnowledgeSt
All required parameters must be populated in order to send to Azure.
- :keyword reference_key_name: Name of reference key to different projection.
- :paramtype reference_key_name: str
- :keyword generated_key_name: Name of generated key to store projection under.
- :paramtype generated_key_name: str
- :keyword source: Source data to project.
- :paramtype source: str
- :keyword source_context: Source context for complex projections.
- :paramtype source_context: str
- :keyword inputs: Nested inputs for complex projections.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword storage_container: Required. Blob container to store projections in.
- :paramtype storage_container: str
+ :ivar reference_key_name: Name of reference key to different projection.
+ :vartype reference_key_name: str
+ :ivar generated_key_name: Name of generated key to store projection under.
+ :vartype generated_key_name: str
+ :ivar source: Source data to project.
+ :vartype source: str
+ :ivar source_context: Source context for complex projections.
+ :vartype source_context: str
+ :ivar inputs: Nested inputs for complex projections.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar storage_container: Required. Blob container to store projections in.
+ :vartype storage_container: str
"""
_validation = {
@@ -5035,6 +6578,20 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword reference_key_name: Name of reference key to different projection.
+ :paramtype reference_key_name: str
+ :keyword generated_key_name: Name of generated key to store projection under.
+ :paramtype generated_key_name: str
+ :keyword source: Source data to project.
+ :paramtype source: str
+ :keyword source_context: Source context for complex projections.
+ :paramtype source_context: str
+ :keyword inputs: Nested inputs for complex projections.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword storage_container: Required. Blob container to store projections in.
+ :paramtype storage_container: str
+ """
super(SearchIndexerKnowledgeStoreFileProjectionSelector, self).__init__(**kwargs)
@@ -5043,18 +6600,18 @@ class SearchIndexerKnowledgeStoreObjectProjectionSelector(SearchIndexerKnowledge
All required parameters must be populated in order to send to Azure.
- :keyword reference_key_name: Name of reference key to different projection.
- :paramtype reference_key_name: str
- :keyword generated_key_name: Name of generated key to store projection under.
- :paramtype generated_key_name: str
- :keyword source: Source data to project.
- :paramtype source: str
- :keyword source_context: Source context for complex projections.
- :paramtype source_context: str
- :keyword inputs: Nested inputs for complex projections.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword storage_container: Required. Blob container to store projections in.
- :paramtype storage_container: str
+ :ivar reference_key_name: Name of reference key to different projection.
+ :vartype reference_key_name: str
+ :ivar generated_key_name: Name of generated key to store projection under.
+ :vartype generated_key_name: str
+ :ivar source: Source data to project.
+ :vartype source: str
+ :ivar source_context: Source context for complex projections.
+ :vartype source_context: str
+ :ivar inputs: Nested inputs for complex projections.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar storage_container: Required. Blob container to store projections in.
+ :vartype storage_container: str
"""
_validation = {
@@ -5074,20 +6631,34 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword reference_key_name: Name of reference key to different projection.
+ :paramtype reference_key_name: str
+ :keyword generated_key_name: Name of generated key to store projection under.
+ :paramtype generated_key_name: str
+ :keyword source: Source data to project.
+ :paramtype source: str
+ :keyword source_context: Source context for complex projections.
+ :paramtype source_context: str
+ :keyword inputs: Nested inputs for complex projections.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword storage_container: Required. Blob container to store projections in.
+ :paramtype storage_container: str
+ """
super(SearchIndexerKnowledgeStoreObjectProjectionSelector, self).__init__(**kwargs)
class SearchIndexerKnowledgeStoreProjection(msrest.serialization.Model):
"""Container object for various projection selectors.
- :keyword tables: Projections to Azure Table storage.
- :paramtype tables:
+ :ivar tables: Projections to Azure Table storage.
+ :vartype tables:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreTableProjectionSelector]
- :keyword objects: Projections to Azure Blob storage.
- :paramtype objects:
+ :ivar objects: Projections to Azure Blob storage.
+ :vartype objects:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreObjectProjectionSelector]
- :keyword files: Projections to Azure File storage.
- :paramtype files:
+ :ivar files: Projections to Azure File storage.
+ :vartype files:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreFileProjectionSelector]
"""
@@ -5101,6 +6672,17 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword tables: Projections to Azure Table storage.
+ :paramtype tables:
+ list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreTableProjectionSelector]
+ :keyword objects: Projections to Azure Blob storage.
+ :paramtype objects:
+ list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreObjectProjectionSelector]
+ :keyword files: Projections to Azure File storage.
+ :paramtype files:
+ list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreFileProjectionSelector]
+ """
super(SearchIndexerKnowledgeStoreProjection, self).__init__(**kwargs)
self.tables = kwargs.get('tables', None)
self.objects = kwargs.get('objects', None)
@@ -5112,18 +6694,18 @@ class SearchIndexerKnowledgeStoreTableProjectionSelector(SearchIndexerKnowledgeS
All required parameters must be populated in order to send to Azure.
- :keyword reference_key_name: Name of reference key to different projection.
- :paramtype reference_key_name: str
- :keyword generated_key_name: Name of generated key to store projection under.
- :paramtype generated_key_name: str
- :keyword source: Source data to project.
- :paramtype source: str
- :keyword source_context: Source context for complex projections.
- :paramtype source_context: str
- :keyword inputs: Nested inputs for complex projections.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword table_name: Required. Name of the Azure table to store projected data in.
- :paramtype table_name: str
+ :ivar reference_key_name: Name of reference key to different projection.
+ :vartype reference_key_name: str
+ :ivar generated_key_name: Name of generated key to store projection under.
+ :vartype generated_key_name: str
+ :ivar source: Source data to project.
+ :vartype source: str
+ :ivar source_context: Source context for complex projections.
+ :vartype source_context: str
+ :ivar inputs: Nested inputs for complex projections.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar table_name: Required. Name of the Azure table to store projected data in.
+ :vartype table_name: str
"""
_validation = {
@@ -5143,6 +6725,20 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword reference_key_name: Name of reference key to different projection.
+ :paramtype reference_key_name: str
+ :keyword generated_key_name: Name of generated key to store projection under.
+ :paramtype generated_key_name: str
+ :keyword source: Source data to project.
+ :paramtype source: str
+ :keyword source_context: Source context for complex projections.
+ :paramtype source_context: str
+ :keyword inputs: Nested inputs for complex projections.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword table_name: Required. Name of the Azure table to store projected data in.
+ :paramtype table_name: str
+ """
super(SearchIndexerKnowledgeStoreTableProjectionSelector, self).__init__(**kwargs)
self.table_name = kwargs['table_name']
@@ -5179,6 +6775,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchIndexerLimits, self).__init__(**kwargs)
self.max_run_time = None
self.max_document_extraction_size = None
@@ -5190,22 +6788,22 @@ class SearchIndexerSkillset(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the skillset.
- :paramtype name: str
- :keyword description: The description of the skillset.
- :paramtype description: str
- :keyword skills: Required. A list of skills in the skillset.
- :paramtype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill]
- :keyword cognitive_services_account: Details about cognitive services to be used when running
+ :ivar name: Required. The name of the skillset.
+ :vartype name: str
+ :ivar description: The description of the skillset.
+ :vartype description: str
+ :ivar skills: Required. A list of skills in the skillset.
+ :vartype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill]
+ :ivar cognitive_services_account: Details about cognitive services to be used when running
skills.
- :paramtype cognitive_services_account:
+ :vartype cognitive_services_account:
~azure.search.documents.indexes.models.CognitiveServicesAccount
- :keyword knowledge_store: Definition of additional projections to azure blob, table, or files,
- of enriched data.
- :paramtype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore
- :keyword e_tag: The ETag of the skillset.
- :paramtype e_tag: str
- :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ :ivar knowledge_store: Definition of additional projections to azure blob, table, or files, of
+ enriched data.
+ :vartype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore
+ :ivar e_tag: The ETag of the skillset.
+ :vartype e_tag: str
+ :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your skillset
definition when you want full assurance that no one, not even Microsoft, can decrypt your
skillset definition in Azure Cognitive Search. Once you have encrypted your skillset
@@ -5214,7 +6812,7 @@ class SearchIndexerSkillset(msrest.serialization.Model):
encryption key; Your skillset definition will be unaffected. Encryption with customer-managed
keys is not available for free search services, and is only available for paid services created
on or after January 1, 2019.
- :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
"""
_validation = {
@@ -5236,6 +6834,33 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the skillset.
+ :paramtype name: str
+ :keyword description: The description of the skillset.
+ :paramtype description: str
+ :keyword skills: Required. A list of skills in the skillset.
+ :paramtype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill]
+ :keyword cognitive_services_account: Details about cognitive services to be used when running
+ skills.
+ :paramtype cognitive_services_account:
+ ~azure.search.documents.indexes.models.CognitiveServicesAccount
+ :keyword knowledge_store: Definition of additional projections to azure blob, table, or files,
+ of enriched data.
+ :paramtype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore
+ :keyword e_tag: The ETag of the skillset.
+ :paramtype e_tag: str
+ :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ This key is used to provide an additional level of encryption-at-rest for your skillset
+ definition when you want full assurance that no one, not even Microsoft, can decrypt your
+ skillset definition in Azure Cognitive Search. Once you have encrypted your skillset
+ definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set
+ this property to null. You can change this property as needed if you want to rotate your
+ encryption key; Your skillset definition will be unaffected. Encryption with customer-managed
+ keys is not available for free search services, and is only available for paid services created
+ on or after January 1, 2019.
+ :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ """
super(SearchIndexerSkillset, self).__init__(**kwargs)
self.name = kwargs['name']
self.description = kwargs.get('description', None)
@@ -5283,6 +6908,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchIndexerStatus, self).__init__(**kwargs)
self.status = None
self.last_result = None
@@ -5333,6 +6960,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchIndexerWarning, self).__init__(**kwargs)
self.key = None
self.message = None
@@ -5346,25 +6975,25 @@ class SearchResourceEncryptionKey(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword key_name: Required. The name of your Azure Key Vault key to be used to encrypt your
+ :ivar key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data
+ at rest.
+ :vartype key_name: str
+ :ivar key_version: Required. The version of your Azure Key Vault key to be used to encrypt your
data at rest.
- :paramtype key_name: str
- :keyword key_version: Required. The version of your Azure Key Vault key to be used to encrypt
- your data at rest.
- :paramtype key_version: str
- :keyword vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name,
- that contains the key to be used to encrypt your data at rest. An example URI might be
+ :vartype key_version: str
+ :ivar vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that
+ contains the key to be used to encrypt your data at rest. An example URI might be
https://my-keyvault-name.vault.azure.net.
- :paramtype vault_uri: str
- :keyword access_credentials: Optional Azure Active Directory credentials used for accessing
- your Azure Key Vault. Not required if using managed identity instead.
- :paramtype access_credentials:
+ :vartype vault_uri: str
+ :ivar access_credentials: Optional Azure Active Directory credentials used for accessing your
+ Azure Key Vault. Not required if using managed identity instead.
+ :vartype access_credentials:
~azure.search.documents.indexes.models.AzureActiveDirectoryApplicationCredentials
- :keyword identity: An explicit managed identity to use for this encryption key. If not
- specified and the access credentials property is null, the system-assigned managed identity is
- used. On update to the resource, if the explicit identity is unspecified, it remains unchanged.
- If "none" is specified, the value of this property is cleared.
- :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
+ :ivar identity: An explicit managed identity to use for this encryption key. If not specified
+ and the access credentials property is null, the system-assigned managed identity is used. On
+ update to the resource, if the explicit identity is unspecified, it remains unchanged. If
+ "none" is specified, the value of this property is cleared.
+ :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
"""
_validation = {
@@ -5385,6 +7014,27 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword key_name: Required. The name of your Azure Key Vault key to be used to encrypt your
+ data at rest.
+ :paramtype key_name: str
+ :keyword key_version: Required. The version of your Azure Key Vault key to be used to encrypt
+ your data at rest.
+ :paramtype key_version: str
+ :keyword vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name,
+ that contains the key to be used to encrypt your data at rest. An example URI might be
+ https://my-keyvault-name.vault.azure.net.
+ :paramtype vault_uri: str
+ :keyword access_credentials: Optional Azure Active Directory credentials used for accessing
+ your Azure Key Vault. Not required if using managed identity instead.
+ :paramtype access_credentials:
+ ~azure.search.documents.indexes.models.AzureActiveDirectoryApplicationCredentials
+ :keyword identity: An explicit managed identity to use for this encryption key. If not
+ specified and the access credentials property is null, the system-assigned managed identity is
+ used. On update to the resource, if the explicit identity is unspecified, it remains unchanged.
+ If "none" is specified, the value of this property is cleared.
+ :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
+ """
super(SearchResourceEncryptionKey, self).__init__(**kwargs)
self.key_name = kwargs['key_name']
self.key_version = kwargs['key_version']
@@ -5398,30 +7048,29 @@ class SentimentSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT",
"ru", "es", "sv", "tr".
- :paramtype default_language_code: str or
+ :vartype default_language_code: str or
~azure.search.documents.indexes.models.SentimentSkillLanguage
"""
@@ -5445,6 +7094,30 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT",
+ "ru", "es", "sv", "tr".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.SentimentSkillLanguage
+ """
super(SentimentSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.SentimentSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
@@ -5455,36 +7128,35 @@ class SentimentSkillV3(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
- :paramtype default_language_code: str
- :keyword include_opinion_mining: If set to true, the skill output will include information from
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
+ :vartype default_language_code: str
+ :ivar include_opinion_mining: If set to true, the skill output will include information from
Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated
assessment (adjective) in the text. Default is false.
- :paramtype include_opinion_mining: bool
- :keyword model_version: The version of the model to use when calling the Text Analytics
- service. It will default to the latest available when not specified. We recommend you do not
- specify this value unless absolutely necessary.
- :paramtype model_version: str
+ :vartype include_opinion_mining: bool
+ :ivar model_version: The version of the model to use when calling the Text Analytics service.
+ It will default to the latest available when not specified. We recommend you do not specify
+ this value unless absolutely necessary.
+ :vartype model_version: str
"""
_validation = {
@@ -5509,6 +7181,35 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :paramtype default_language_code: str
+ :keyword include_opinion_mining: If set to true, the skill output will include information from
+ Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated
+ assessment (adjective) in the text. Default is false.
+ :paramtype include_opinion_mining: bool
+ :keyword model_version: The version of the model to use when calling the Text Analytics
+ service. It will default to the latest available when not specified. We recommend you do not
+ specify this value unless absolutely necessary.
+ :paramtype model_version: str
+ """
super(SentimentSkillV3, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.V3.SentimentSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
@@ -5521,21 +7222,20 @@ class ServiceCounters(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword document_counter: Required. Total number of documents across all indexes in the
- service.
- :paramtype document_counter: ~azure.search.documents.indexes.models.ResourceCounter
- :keyword index_counter: Required. Total number of indexes.
- :paramtype index_counter: ~azure.search.documents.indexes.models.ResourceCounter
- :keyword indexer_counter: Required. Total number of indexers.
- :paramtype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter
- :keyword data_source_counter: Required. Total number of data sources.
- :paramtype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter
- :keyword storage_size_counter: Required. Total size of used storage in bytes.
- :paramtype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter
- :keyword synonym_map_counter: Required. Total number of synonym maps.
- :paramtype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter
- :keyword skillset_counter: Total number of skillsets.
- :paramtype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar document_counter: Required. Total number of documents across all indexes in the service.
+ :vartype document_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar index_counter: Required. Total number of indexes.
+ :vartype index_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar indexer_counter: Required. Total number of indexers.
+ :vartype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar data_source_counter: Required. Total number of data sources.
+ :vartype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar storage_size_counter: Required. Total size of used storage in bytes.
+ :vartype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar synonym_map_counter: Required. Total number of synonym maps.
+ :vartype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar skillset_counter: Total number of skillsets.
+ :vartype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter
"""
_validation = {
@@ -5561,6 +7261,23 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword document_counter: Required. Total number of documents across all indexes in the
+ service.
+ :paramtype document_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :keyword index_counter: Required. Total number of indexes.
+ :paramtype index_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :keyword indexer_counter: Required. Total number of indexers.
+ :paramtype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :keyword data_source_counter: Required. Total number of data sources.
+ :paramtype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :keyword storage_size_counter: Required. Total size of used storage in bytes.
+ :paramtype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :keyword synonym_map_counter: Required. Total number of synonym maps.
+ :paramtype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :keyword skillset_counter: Total number of skillsets.
+ :paramtype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ """
super(ServiceCounters, self).__init__(**kwargs)
self.document_counter = kwargs['document_counter']
self.index_counter = kwargs['index_counter']
@@ -5574,17 +7291,17 @@ def __init__(
class ServiceLimits(msrest.serialization.Model):
"""Represents various service level limits.
- :keyword max_fields_per_index: The maximum allowed fields per index.
- :paramtype max_fields_per_index: int
- :keyword max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in
- an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3.
- :paramtype max_field_nesting_depth_per_index: int
- :keyword max_complex_collection_fields_per_index: The maximum number of fields of type
+ :ivar max_fields_per_index: The maximum allowed fields per index.
+ :vartype max_fields_per_index: int
+ :ivar max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in an
+ index, including the top-level complex field. For example, a/b/c has a nesting depth of 3.
+ :vartype max_field_nesting_depth_per_index: int
+ :ivar max_complex_collection_fields_per_index: The maximum number of fields of type
Collection(Edm.ComplexType) allowed in an index.
- :paramtype max_complex_collection_fields_per_index: int
- :keyword max_complex_objects_in_collections_per_document: The maximum number of objects in
- complex collections allowed per document.
- :paramtype max_complex_objects_in_collections_per_document: int
+ :vartype max_complex_collection_fields_per_index: int
+ :ivar max_complex_objects_in_collections_per_document: The maximum number of objects in complex
+ collections allowed per document.
+ :vartype max_complex_objects_in_collections_per_document: int
"""
_attribute_map = {
@@ -5598,6 +7315,19 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword max_fields_per_index: The maximum allowed fields per index.
+ :paramtype max_fields_per_index: int
+ :keyword max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in
+ an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3.
+ :paramtype max_field_nesting_depth_per_index: int
+ :keyword max_complex_collection_fields_per_index: The maximum number of fields of type
+ Collection(Edm.ComplexType) allowed in an index.
+ :paramtype max_complex_collection_fields_per_index: int
+ :keyword max_complex_objects_in_collections_per_document: The maximum number of objects in
+ complex collections allowed per document.
+ :paramtype max_complex_objects_in_collections_per_document: int
+ """
super(ServiceLimits, self).__init__(**kwargs)
self.max_fields_per_index = kwargs.get('max_fields_per_index', None)
self.max_field_nesting_depth_per_index = kwargs.get('max_field_nesting_depth_per_index', None)
@@ -5610,10 +7340,10 @@ class ServiceStatistics(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword counters: Required. Service level resource counters.
- :paramtype counters: ~azure.search.documents.indexes.models.ServiceCounters
- :keyword limits: Required. Service level general limits.
- :paramtype limits: ~azure.search.documents.indexes.models.ServiceLimits
+ :ivar counters: Required. Service level resource counters.
+ :vartype counters: ~azure.search.documents.indexes.models.ServiceCounters
+ :ivar limits: Required. Service level general limits.
+ :vartype limits: ~azure.search.documents.indexes.models.ServiceLimits
"""
_validation = {
@@ -5630,6 +7360,12 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword counters: Required. Service level resource counters.
+ :paramtype counters: ~azure.search.documents.indexes.models.ServiceCounters
+ :keyword limits: Required. Service level general limits.
+ :paramtype limits: ~azure.search.documents.indexes.models.ServiceLimits
+ """
super(ServiceStatistics, self).__init__(**kwargs)
self.counters = kwargs['counters']
self.limits = kwargs['limits']
@@ -5640,26 +7376,25 @@ class ShaperSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
"""
_validation = {
@@ -5681,6 +7416,25 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ """
super(ShaperSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Util.ShaperSkill' # type: str
@@ -5690,31 +7444,31 @@ class ShingleTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword max_shingle_size: The maximum shingle size. Default and minimum value is 2.
- :paramtype max_shingle_size: int
- :keyword min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be
- less than the value of maxShingleSize.
- :paramtype min_shingle_size: int
- :keyword output_unigrams: A value indicating whether the output stream will contain the input
+ :vartype name: str
+ :ivar max_shingle_size: The maximum shingle size. Default and minimum value is 2.
+ :vartype max_shingle_size: int
+ :ivar min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be less
+ than the value of maxShingleSize.
+ :vartype min_shingle_size: int
+ :ivar output_unigrams: A value indicating whether the output stream will contain the input
tokens (unigrams) as well as shingles. Default is true.
- :paramtype output_unigrams: bool
- :keyword output_unigrams_if_no_shingles: A value indicating whether to output unigrams for
- those times when no shingles are available. This property takes precedence when outputUnigrams
- is set to false. Default is false.
- :paramtype output_unigrams_if_no_shingles: bool
- :keyword token_separator: The string to use when joining adjacent tokens to form a shingle.
+ :vartype output_unigrams: bool
+ :ivar output_unigrams_if_no_shingles: A value indicating whether to output unigrams for those
+ times when no shingles are available. This property takes precedence when outputUnigrams is set
+ to false. Default is false.
+ :vartype output_unigrams_if_no_shingles: bool
+ :ivar token_separator: The string to use when joining adjacent tokens to form a shingle.
Default is a single space (" ").
- :paramtype token_separator: str
- :keyword filter_token: The string to insert for each position at which there is no token.
- Default is an underscore ("_").
- :paramtype filter_token: str
+ :vartype token_separator: str
+ :ivar filter_token: The string to insert for each position at which there is no token. Default
+ is an underscore ("_").
+ :vartype filter_token: str
"""
_validation = {
@@ -5739,6 +7493,30 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_shingle_size: The maximum shingle size. Default and minimum value is 2.
+ :paramtype max_shingle_size: int
+ :keyword min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be
+ less than the value of maxShingleSize.
+ :paramtype min_shingle_size: int
+ :keyword output_unigrams: A value indicating whether the output stream will contain the input
+ tokens (unigrams) as well as shingles. Default is true.
+ :paramtype output_unigrams: bool
+ :keyword output_unigrams_if_no_shingles: A value indicating whether to output unigrams for
+ those times when no shingles are available. This property takes precedence when outputUnigrams
+ is set to false. Default is false.
+ :paramtype output_unigrams_if_no_shingles: bool
+ :keyword token_separator: The string to use when joining adjacent tokens to form a shingle.
+ Default is a single space (" ").
+ :paramtype token_separator: str
+ :keyword filter_token: The string to insert for each position at which there is no token.
+ Default is an underscore ("_").
+ :paramtype filter_token: str
+ """
super(ShingleTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.ShingleTokenFilter' # type: str
self.max_shingle_size = kwargs.get('max_shingle_size', 2)
@@ -5749,23 +7527,46 @@ def __init__(
self.filter_token = kwargs.get('filter_token', "_")
+class SkillNames(msrest.serialization.Model):
+ """SkillNames.
+
+ :ivar skill_names: the names of skills to be reset.
+ :vartype skill_names: list[str]
+ """
+
+ _attribute_map = {
+ 'skill_names': {'key': 'skillNames', 'type': '[str]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ """
+ :keyword skill_names: the names of skills to be reset.
+ :paramtype skill_names: list[str]
+ """
+ super(SkillNames, self).__init__(**kwargs)
+ self.skill_names = kwargs.get('skill_names', None)
+
+
class SnowballTokenFilter(TokenFilter):
"""A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword language: Required. The language to use. Possible values include: "armenian",
- "basque", "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2",
- "hungarian", "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian",
- "russian", "spanish", "swedish", "turkish".
- :paramtype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage
+ :vartype name: str
+ :ivar language: Required. The language to use. Possible values include: "armenian", "basque",
+ "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", "hungarian",
+ "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", "russian",
+ "spanish", "swedish", "turkish".
+ :vartype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage
"""
_validation = {
@@ -5784,6 +7585,17 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword language: Required. The language to use. Possible values include: "armenian",
+ "basque", "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2",
+ "hungarian", "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian",
+ "russian", "spanish", "swedish", "turkish".
+ :paramtype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage
+ """
super(SnowballTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SnowballTokenFilter' # type: str
self.language = kwargs['language']
@@ -5794,13 +7606,13 @@ class SoftDeleteColumnDeletionDetectionPolicy(DataDeletionDetectionPolicy):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the data deletion detection
+ :ivar odata_type: Required. Identifies the concrete type of the data deletion detection
policy.Constant filled by server.
- :paramtype odata_type: str
- :keyword soft_delete_column_name: The name of the column to use for soft-deletion detection.
- :paramtype soft_delete_column_name: str
- :keyword soft_delete_marker_value: The marker value that identifies an item as deleted.
- :paramtype soft_delete_marker_value: str
+ :vartype odata_type: str
+ :ivar soft_delete_column_name: The name of the column to use for soft-deletion detection.
+ :vartype soft_delete_column_name: str
+ :ivar soft_delete_marker_value: The marker value that identifies an item as deleted.
+ :vartype soft_delete_marker_value: str
"""
_validation = {
@@ -5817,6 +7629,12 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword soft_delete_column_name: The name of the column to use for soft-deletion detection.
+ :paramtype soft_delete_column_name: str
+ :keyword soft_delete_marker_value: The marker value that identifies an item as deleted.
+ :paramtype soft_delete_marker_value: str
+ """
super(SoftDeleteColumnDeletionDetectionPolicy, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy' # type: str
self.soft_delete_column_name = kwargs.get('soft_delete_column_name', None)
@@ -5828,35 +7646,34 @@ class SplitSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt".
- :paramtype default_language_code: str or
+ :vartype default_language_code: str or
~azure.search.documents.indexes.models.SplitSkillLanguage
- :keyword text_split_mode: A value indicating which split mode to perform. Possible values
- include: "pages", "sentences".
- :paramtype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode
- :keyword maximum_page_length: The desired maximum page length. Default is 10000.
- :paramtype maximum_page_length: int
+ :ivar text_split_mode: A value indicating which split mode to perform. Possible values include:
+ "pages", "sentences".
+ :vartype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode
+ :ivar maximum_page_length: The desired maximum page length. Default is 10000.
+ :vartype maximum_page_length: int
"""
_validation = {
@@ -5881,6 +7698,34 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.SplitSkillLanguage
+ :keyword text_split_mode: A value indicating which split mode to perform. Possible values
+ include: "pages", "sentences".
+ :paramtype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode
+ :keyword maximum_page_length: The desired maximum page length. Default is 10000.
+ :paramtype maximum_page_length: int
+ """
super(SplitSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.SplitSkill' # type: str
self.default_language_code = kwargs.get('default_language_code', None)
@@ -5893,9 +7738,9 @@ class SqlIntegratedChangeTrackingPolicy(DataChangeDetectionPolicy):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the data change detection
+ :ivar odata_type: Required. Identifies the concrete type of the data change detection
policy.Constant filled by server.
- :paramtype odata_type: str
+ :vartype odata_type: str
"""
_validation = {
@@ -5910,6 +7755,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SqlIntegratedChangeTrackingPolicy, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' # type: str
@@ -5919,16 +7766,16 @@ class StemmerOverrideTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword rules: Required. A list of stemming rules in the following format: "word => stem", for
+ :vartype name: str
+ :ivar rules: Required. A list of stemming rules in the following format: "word => stem", for
example: "ran => run".
- :paramtype rules: list[str]
+ :vartype rules: list[str]
"""
_validation = {
@@ -5947,6 +7794,15 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword rules: Required. A list of stemming rules in the following format: "word => stem", for
+ example: "ran => run".
+ :paramtype rules: list[str]
+ """
super(StemmerOverrideTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' # type: str
self.rules = kwargs['rules']
@@ -5957,23 +7813,23 @@ class StemmerTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword language: Required. The language to use. Possible values include: "arabic",
- "armenian", "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch",
- "dutchKp", "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2",
- "lovins", "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician",
+ :vartype name: str
+ :ivar language: Required. The language to use. Possible values include: "arabic", "armenian",
+ "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "dutchKp",
+ "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", "lovins",
+ "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician",
"minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi",
"hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani",
"latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk",
"portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian",
"lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish".
- :paramtype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage
+ :vartype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage
"""
_validation = {
@@ -5992,6 +7848,22 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword language: Required. The language to use. Possible values include: "arabic",
+ "armenian", "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch",
+ "dutchKp", "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2",
+ "lovins", "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician",
+ "minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi",
+ "hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani",
+ "latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk",
+ "portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian",
+ "lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish".
+ :paramtype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage
+ """
super(StemmerTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StemmerTokenFilter' # type: str
self.language = kwargs['language']
@@ -6002,15 +7874,15 @@ class StopAnalyzer(LexicalAnalyzer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword stopwords: A list of stopwords.
- :paramtype stopwords: list[str]
+ :vartype odata_type: str
+ :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar stopwords: A list of stopwords.
+ :vartype stopwords: list[str]
"""
_validation = {
@@ -6028,6 +7900,14 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword stopwords: A list of stopwords.
+ :paramtype stopwords: list[str]
+ """
super(StopAnalyzer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StopAnalyzer' # type: str
self.stopwords = kwargs.get('stopwords', None)
@@ -6038,29 +7918,29 @@ class StopwordsTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword stopwords: The list of stopwords. This property and the stopwords list property cannot
+ :vartype name: str
+ :ivar stopwords: The list of stopwords. This property and the stopwords list property cannot
both be set.
- :paramtype stopwords: list[str]
- :keyword stopwords_list: A predefined list of stopwords to use. This property and the stopwords
+ :vartype stopwords: list[str]
+ :ivar stopwords_list: A predefined list of stopwords to use. This property and the stopwords
property cannot both be set. Default is English. Possible values include: "arabic", "armenian",
"basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english",
"finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian",
"irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian",
"sorani", "spanish", "swedish", "thai", "turkish".
- :paramtype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList
- :keyword ignore_case: A value indicating whether to ignore case. If true, all words are
- converted to lower case first. Default is false.
- :paramtype ignore_case: bool
- :keyword remove_trailing_stop_words: A value indicating whether to ignore the last search term
- if it's a stop word. Default is true.
- :paramtype remove_trailing_stop_words: bool
+ :vartype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList
+ :ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted
+ to lower case first. Default is false.
+ :vartype ignore_case: bool
+ :ivar remove_trailing_stop_words: A value indicating whether to ignore the last search term if
+ it's a stop word. Default is true.
+ :vartype remove_trailing_stop_words: bool
"""
_validation = {
@@ -6081,6 +7961,28 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword stopwords: The list of stopwords. This property and the stopwords list property cannot
+ both be set.
+ :paramtype stopwords: list[str]
+ :keyword stopwords_list: A predefined list of stopwords to use. This property and the stopwords
+ property cannot both be set. Default is English. Possible values include: "arabic", "armenian",
+ "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english",
+ "finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian",
+ "irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian",
+ "sorani", "spanish", "swedish", "thai", "turkish".
+ :paramtype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList
+ :keyword ignore_case: A value indicating whether to ignore case. If true, all words are
+ converted to lower case first. Default is false.
+ :paramtype ignore_case: bool
+ :keyword remove_trailing_stop_words: A value indicating whether to ignore the last search term
+ if it's a stop word. Default is true.
+ :paramtype remove_trailing_stop_words: bool
+ """
super(StopwordsTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.StopwordsTokenFilter' # type: str
self.stopwords = kwargs.get('stopwords', None)
@@ -6096,14 +7998,14 @@ class Suggester(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the suggester.
- :paramtype name: str
+ :ivar name: Required. The name of the suggester.
+ :vartype name: str
:ivar search_mode: A value indicating the capabilities of the suggester. Has constant value:
"analyzingInfixMatching".
:vartype search_mode: str
- :keyword source_fields: Required. The list of field names to which the suggester applies. Each
+ :ivar source_fields: Required. The list of field names to which the suggester applies. Each
field must be searchable.
- :paramtype source_fields: list[str]
+ :vartype source_fields: list[str]
"""
_validation = {
@@ -6124,6 +8026,13 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the suggester.
+ :paramtype name: str
+ :keyword source_fields: Required. The list of field names to which the suggester applies. Each
+ field must be searchable.
+ :paramtype source_fields: list[str]
+ """
super(Suggester, self).__init__(**kwargs)
self.name = kwargs['name']
self.source_fields = kwargs['source_fields']
@@ -6136,15 +8045,15 @@ class SynonymMap(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the synonym map.
- :paramtype name: str
+ :ivar name: Required. The name of the synonym map.
+ :vartype name: str
:ivar format: The format of the synonym map. Only the 'solr' format is currently supported. Has
constant value: "solr".
:vartype format: str
- :keyword synonyms: Required. A series of synonym rules in the specified synonym map format. The
+ :ivar synonyms: Required. A series of synonym rules in the specified synonym map format. The
rules must be separated by newlines.
- :paramtype synonyms: str
- :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ :vartype synonyms: str
+ :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your data when you
want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
@@ -6152,9 +8061,9 @@ class SynonymMap(msrest.serialization.Model):
needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
customer-managed keys is not available for free search services, and is only available for paid
services created on or after January 1, 2019.
- :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
- :keyword e_tag: The ETag of the synonym map.
- :paramtype e_tag: str
+ :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :ivar e_tag: The ETag of the synonym map.
+ :vartype e_tag: str
"""
_validation = {
@@ -6177,6 +8086,24 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the synonym map.
+ :paramtype name: str
+ :keyword synonyms: Required. A series of synonym rules in the specified synonym map format. The
+ rules must be separated by newlines.
+ :paramtype synonyms: str
+ :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ This key is used to provide an additional level of encryption-at-rest for your data when you
+ want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
+ Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
+ Search will ignore attempts to set this property to null. You can change this property as
+ needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
+ customer-managed keys is not available for free search services, and is only available for paid
+ services created on or after January 1, 2019.
+ :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :keyword e_tag: The ETag of the synonym map.
+ :paramtype e_tag: str
+ """
super(SynonymMap, self).__init__(**kwargs)
self.name = kwargs['name']
self.synonyms = kwargs['synonyms']
@@ -6189,30 +8116,30 @@ class SynonymTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword synonyms: Required. A list of synonyms in following one of two formats: 1. incredible,
+ :vartype name: str
+ :ivar synonyms: Required. A list of synonyms in following one of two formats: 1. incredible,
unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced
with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma
separated list of equivalent words. Set the expand option to change how this list is
interpreted.
- :paramtype synonyms: list[str]
- :keyword ignore_case: A value indicating whether to case-fold input for matching. Default is
+ :vartype synonyms: list[str]
+ :ivar ignore_case: A value indicating whether to case-fold input for matching. Default is
false.
- :paramtype ignore_case: bool
- :keyword expand: A value indicating whether all words in the list of synonyms (if => notation
- is not used) will map to one another. If true, all words in the list of synonyms (if =>
- notation is not used) will map to one another. The following list: incredible, unbelievable,
- fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible,
+ :vartype ignore_case: bool
+ :ivar expand: A value indicating whether all words in the list of synonyms (if => notation is
+ not used) will map to one another. If true, all words in the list of synonyms (if => notation
+ is not used) will map to one another. The following list: incredible, unbelievable, fabulous,
+ amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible,
unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable,
fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing =>
incredible. Default is true.
- :paramtype expand: bool
+ :vartype expand: bool
"""
_validation = {
@@ -6233,6 +8160,29 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword synonyms: Required. A list of synonyms in following one of two formats: 1. incredible,
+ unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced
+ with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma
+ separated list of equivalent words. Set the expand option to change how this list is
+ interpreted.
+ :paramtype synonyms: list[str]
+ :keyword ignore_case: A value indicating whether to case-fold input for matching. Default is
+ false.
+ :paramtype ignore_case: bool
+ :keyword expand: A value indicating whether all words in the list of synonyms (if => notation
+ is not used) will map to one another. If true, all words in the list of synonyms (if =>
+ notation is not used) will map to one another. The following list: incredible, unbelievable,
+ fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible,
+ unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable,
+ fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing =>
+ incredible. Default is true.
+ :paramtype expand: bool
+ """
super(SynonymTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SynonymTokenFilter' # type: str
self.synonyms = kwargs['synonyms']
@@ -6245,21 +8195,21 @@ class TagScoringFunction(ScoringFunction):
All required parameters must be populated in order to send to Azure.
- :keyword type: Required. Indicates the type of function to use. Valid values include magnitude,
+ :ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
- :paramtype type: str
- :keyword field_name: Required. The name of the field used as input to the scoring function.
- :paramtype field_name: str
- :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
- to 1.0.
- :paramtype boost: float
- :keyword interpolation: A value indicating how boosting will be interpolated across document
+ :vartype type: str
+ :ivar field_name: Required. The name of the field used as input to the scoring function.
+ :vartype field_name: str
+ :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
+ 1.0.
+ :vartype boost: float
+ :ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
- :paramtype interpolation: str or
+ :vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
- :keyword parameters: Required. Parameter values for the tag scoring function.
- :paramtype parameters: ~azure.search.documents.indexes.models.TagScoringParameters
+ :ivar parameters: Required. Parameter values for the tag scoring function.
+ :vartype parameters: ~azure.search.documents.indexes.models.TagScoringParameters
"""
_validation = {
@@ -6281,6 +8231,20 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword field_name: Required. The name of the field used as input to the scoring function.
+ :paramtype field_name: str
+ :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
+ to 1.0.
+ :paramtype boost: float
+ :keyword interpolation: A value indicating how boosting will be interpolated across document
+ scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
+ "logarithmic".
+ :paramtype interpolation: str or
+ ~azure.search.documents.indexes.models.ScoringFunctionInterpolation
+ :keyword parameters: Required. Parameter values for the tag scoring function.
+ :paramtype parameters: ~azure.search.documents.indexes.models.TagScoringParameters
+ """
super(TagScoringFunction, self).__init__(**kwargs)
self.type = 'tag' # type: str
self.parameters = kwargs['parameters']
@@ -6291,9 +8255,9 @@ class TagScoringParameters(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword tags_parameter: Required. The name of the parameter passed in search queries to
- specify the list of tags to compare against the target field.
- :paramtype tags_parameter: str
+ :ivar tags_parameter: Required. The name of the parameter passed in search queries to specify
+ the list of tags to compare against the target field.
+ :vartype tags_parameter: str
"""
_validation = {
@@ -6308,6 +8272,11 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword tags_parameter: Required. The name of the parameter passed in search queries to
+ specify the list of tags to compare against the target field.
+ :paramtype tags_parameter: str
+ """
super(TagScoringParameters, self).__init__(**kwargs)
self.tags_parameter = kwargs['tags_parameter']
@@ -6317,45 +8286,44 @@ class TextTranslationSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_to_language_code: Required. The language code to translate documents into for
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_to_language_code: Required. The language code to translate documents into for
documents that don't specify the to language explicitly. Possible values include: "af", "ar",
"bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj",
"fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw",
"tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt",
"pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty",
"ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
- :paramtype default_to_language_code: str or
+ :vartype default_to_language_code: str or
~azure.search.documents.indexes.models.TextTranslationSkillLanguage
- :keyword default_from_language_code: The language code to translate documents from for
- documents that don't specify the from language explicitly. Possible values include: "af", "ar",
- "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj",
- "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw",
- "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt",
- "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty",
- "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
- :paramtype default_from_language_code: str or
+ :ivar default_from_language_code: The language code to translate documents from for documents
+ that don't specify the from language explicitly. Possible values include: "af", "ar", "bn",
+ "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil",
+ "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh",
+ "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br",
+ "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta",
+ "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
+ :vartype default_from_language_code: str or
~azure.search.documents.indexes.models.TextTranslationSkillLanguage
- :keyword suggested_from: The language code to translate documents from when neither the
+ :ivar suggested_from: The language code to translate documents from when neither the
fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the
automatic language detection is unsuccessful. Default is en. Possible values include: "af",
"ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et",
@@ -6364,7 +8332,7 @@ class TextTranslationSkill(SearchIndexerSkill):
"pt", "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv",
"ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml",
"pa".
- :paramtype suggested_from: str or
+ :vartype suggested_from: str or
~azure.search.documents.indexes.models.TextTranslationSkillLanguage
"""
@@ -6391,6 +8359,54 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_to_language_code: Required. The language code to translate documents into for
+ documents that don't specify the to language explicitly. Possible values include: "af", "ar",
+ "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj",
+ "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw",
+ "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt",
+ "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty",
+ "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
+ :paramtype default_to_language_code: str or
+ ~azure.search.documents.indexes.models.TextTranslationSkillLanguage
+ :keyword default_from_language_code: The language code to translate documents from for
+ documents that don't specify the from language explicitly. Possible values include: "af", "ar",
+ "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj",
+ "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw",
+ "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt",
+ "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty",
+ "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
+ :paramtype default_from_language_code: str or
+ ~azure.search.documents.indexes.models.TextTranslationSkillLanguage
+ :keyword suggested_from: The language code to translate documents from when neither the
+ fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the
+ automatic language detection is unsuccessful. Default is en. Possible values include: "af",
+ "ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et",
+ "fj", "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja",
+ "sw", "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl",
+ "pt", "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv",
+ "ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml",
+ "pa".
+ :paramtype suggested_from: str or
+ ~azure.search.documents.indexes.models.TextTranslationSkillLanguage
+ """
super(TextTranslationSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Text.TranslationSkill' # type: str
self.default_to_language_code = kwargs['default_to_language_code']
@@ -6403,9 +8419,9 @@ class TextWeights(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword weights: Required. The dictionary of per-field weights to boost document scoring. The
+ :ivar weights: Required. The dictionary of per-field weights to boost document scoring. The
keys are field names and the values are the weights for each field.
- :paramtype weights: dict[str, float]
+ :vartype weights: dict[str, float]
"""
_validation = {
@@ -6420,6 +8436,11 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword weights: Required. The dictionary of per-field weights to boost document scoring. The
+ keys are field names and the values are the weights for each field.
+ :paramtype weights: dict[str, float]
+ """
super(TextWeights, self).__init__(**kwargs)
self.weights = kwargs['weights']
@@ -6429,15 +8450,15 @@ class TruncateTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword length: The length at which terms will be truncated. Default and maximum is 300.
- :paramtype length: int
+ :vartype name: str
+ :ivar length: The length at which terms will be truncated. Default and maximum is 300.
+ :vartype length: int
"""
_validation = {
@@ -6456,6 +8477,14 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword length: The length at which terms will be truncated. Default and maximum is 300.
+ :paramtype length: int
+ """
super(TruncateTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.TruncateTokenFilter' # type: str
self.length = kwargs.get('length', 300)
@@ -6466,16 +8495,16 @@ class UaxUrlEmailTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
- :paramtype max_token_length: int
+ :vartype max_token_length: int
"""
_validation = {
@@ -6494,6 +8523,15 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ maximum length are split. The maximum token length that can be used is 300 characters.
+ :paramtype max_token_length: int
+ """
super(UaxUrlEmailTokenizer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.UaxUrlEmailTokenizer' # type: str
self.max_token_length = kwargs.get('max_token_length', 255)
@@ -6504,16 +8542,16 @@ class UniqueTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword only_on_same_position: A value indicating whether to remove duplicates only at the
- same position. Default is false.
- :paramtype only_on_same_position: bool
+ :vartype name: str
+ :ivar only_on_same_position: A value indicating whether to remove duplicates only at the same
+ position. Default is false.
+ :vartype only_on_same_position: bool
"""
_validation = {
@@ -6531,6 +8569,15 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword only_on_same_position: A value indicating whether to remove duplicates only at the
+ same position. Default is false.
+ :paramtype only_on_same_position: bool
+ """
super(UniqueTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.UniqueTokenFilter' # type: str
self.only_on_same_position = kwargs.get('only_on_same_position', False)
@@ -6541,39 +8588,38 @@ class WebApiSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword uri: Required. The url for the Web API.
- :paramtype uri: str
- :keyword http_headers: The headers required to make the http request.
- :paramtype http_headers: dict[str, str]
- :keyword http_method: The method for the http request.
- :paramtype http_method: str
- :keyword timeout: The desired timeout for the request. Default is 30 seconds.
- :paramtype timeout: ~datetime.timedelta
- :keyword batch_size: The desired batch size which indicates number of documents.
- :paramtype batch_size: int
- :keyword degree_of_parallelism: If set, the number of parallel calls that can be made to the
- Web API.
- :paramtype degree_of_parallelism: int
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar uri: Required. The url for the Web API.
+ :vartype uri: str
+ :ivar http_headers: The headers required to make the http request.
+ :vartype http_headers: dict[str, str]
+ :ivar http_method: The method for the http request.
+ :vartype http_method: str
+ :ivar timeout: The desired timeout for the request. Default is 30 seconds.
+ :vartype timeout: ~datetime.timedelta
+ :ivar batch_size: The desired batch size which indicates number of documents.
+ :vartype batch_size: int
+ :ivar degree_of_parallelism: If set, the number of parallel calls that can be made to the Web
+ API.
+ :vartype degree_of_parallelism: int
"""
_validation = {
@@ -6602,6 +8648,38 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword uri: Required. The url for the Web API.
+ :paramtype uri: str
+ :keyword http_headers: The headers required to make the http request.
+ :paramtype http_headers: dict[str, str]
+ :keyword http_method: The method for the http request.
+ :paramtype http_method: str
+ :keyword timeout: The desired timeout for the request. Default is 30 seconds.
+ :paramtype timeout: ~datetime.timedelta
+ :keyword batch_size: The desired batch size which indicates number of documents.
+ :paramtype batch_size: int
+ :keyword degree_of_parallelism: If set, the number of parallel calls that can be made to the
+ Web API.
+ :paramtype degree_of_parallelism: int
+ """
super(WebApiSkill, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Skills.Custom.WebApiSkill' # type: str
self.uri = kwargs['uri']
@@ -6617,44 +8695,43 @@ class WordDelimiterTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword generate_word_parts: A value indicating whether to generate part words. If set, causes
+ :vartype name: str
+ :ivar generate_word_parts: A value indicating whether to generate part words. If set, causes
parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is
true.
- :paramtype generate_word_parts: bool
- :keyword generate_number_parts: A value indicating whether to generate number subwords. Default
- is true.
- :paramtype generate_number_parts: bool
- :keyword catenate_words: A value indicating whether maximum runs of word parts will be
- catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default
- is false.
- :paramtype catenate_words: bool
- :keyword catenate_numbers: A value indicating whether maximum runs of number parts will be
+ :vartype generate_word_parts: bool
+ :ivar generate_number_parts: A value indicating whether to generate number subwords. Default is
+ true.
+ :vartype generate_number_parts: bool
+ :ivar catenate_words: A value indicating whether maximum runs of word parts will be catenated.
+ For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false.
+ :vartype catenate_words: bool
+ :ivar catenate_numbers: A value indicating whether maximum runs of number parts will be
catenated. For example, if this is set to true, "1-2" becomes "12". Default is false.
- :paramtype catenate_numbers: bool
- :keyword catenate_all: A value indicating whether all subword parts will be catenated. For
+ :vartype catenate_numbers: bool
+ :ivar catenate_all: A value indicating whether all subword parts will be catenated. For
example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false.
- :paramtype catenate_all: bool
- :keyword split_on_case_change: A value indicating whether to split words on caseChange. For
+ :vartype catenate_all: bool
+ :ivar split_on_case_change: A value indicating whether to split words on caseChange. For
example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true.
- :paramtype split_on_case_change: bool
- :keyword preserve_original: A value indicating whether original words will be preserved and
- added to the subword list. Default is false.
- :paramtype preserve_original: bool
- :keyword split_on_numerics: A value indicating whether to split on numbers. For example, if
- this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true.
- :paramtype split_on_numerics: bool
- :keyword stem_english_possessive: A value indicating whether to remove trailing "'s" for each
+ :vartype split_on_case_change: bool
+ :ivar preserve_original: A value indicating whether original words will be preserved and added
+ to the subword list. Default is false.
+ :vartype preserve_original: bool
+ :ivar split_on_numerics: A value indicating whether to split on numbers. For example, if this
+ is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true.
+ :vartype split_on_numerics: bool
+ :ivar stem_english_possessive: A value indicating whether to remove trailing "'s" for each
subword. Default is true.
- :paramtype stem_english_possessive: bool
- :keyword protected_words: A list of tokens to protect from being delimited.
- :paramtype protected_words: list[str]
+ :vartype stem_english_possessive: bool
+ :ivar protected_words: A list of tokens to protect from being delimited.
+ :vartype protected_words: list[str]
"""
_validation = {
@@ -6681,6 +8758,43 @@ def __init__(
self,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword generate_word_parts: A value indicating whether to generate part words. If set, causes
+ parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is
+ true.
+ :paramtype generate_word_parts: bool
+ :keyword generate_number_parts: A value indicating whether to generate number subwords. Default
+ is true.
+ :paramtype generate_number_parts: bool
+ :keyword catenate_words: A value indicating whether maximum runs of word parts will be
+ catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default
+ is false.
+ :paramtype catenate_words: bool
+ :keyword catenate_numbers: A value indicating whether maximum runs of number parts will be
+ catenated. For example, if this is set to true, "1-2" becomes "12". Default is false.
+ :paramtype catenate_numbers: bool
+ :keyword catenate_all: A value indicating whether all subword parts will be catenated. For
+ example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false.
+ :paramtype catenate_all: bool
+ :keyword split_on_case_change: A value indicating whether to split words on caseChange. For
+ example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true.
+ :paramtype split_on_case_change: bool
+ :keyword preserve_original: A value indicating whether original words will be preserved and
+ added to the subword list. Default is false.
+ :paramtype preserve_original: bool
+ :keyword split_on_numerics: A value indicating whether to split on numbers. For example, if
+ this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true.
+ :paramtype split_on_numerics: bool
+ :keyword stem_english_possessive: A value indicating whether to remove trailing "'s" for each
+ subword. Default is true.
+ :paramtype stem_english_possessive: bool
+ :keyword protected_words: A list of tokens to protect from being delimited.
+ :paramtype protected_words: list[str]
+ """
super(WordDelimiterTokenFilter, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.WordDelimiterTokenFilter' # type: str
self.generate_word_parts = kwargs.get('generate_word_parts', True)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py
index 21b03c1ff677..4a8562656ef1 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py
@@ -53,6 +53,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(AnalyzedTokenInfo, self).__init__(**kwargs)
self.token = None
self.start_offset = None
@@ -65,9 +67,9 @@ class AnalyzeRequest(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword text: Required. The text to break into tokens.
- :paramtype text: str
- :keyword analyzer: The name of the analyzer to use to break the given text. Possible values
+ :ivar text: Required. The text to break into tokens.
+ :vartype text: str
+ :ivar analyzer: The name of the analyzer to use to break the given text. Possible values
include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft",
"bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene",
"zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene",
@@ -85,20 +87,19 @@ class AnalyzeRequest(msrest.serialization.Model):
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
- :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
- :keyword tokenizer: The name of the tokenizer to use to break the given text. Possible values
+ :vartype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :ivar tokenizer: The name of the tokenizer to use to break the given text. Possible values
include: "classic", "edgeNGram", "keyword_v2", "letter", "lowercase",
"microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram",
"path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace".
- :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
- :keyword normalizer: The name of the normalizer to use to normalize the given text. Possible
+ :vartype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
+ :ivar normalizer: The name of the normalizer to use to normalize the given text. Possible
values include: "asciifolding", "elision", "lowercase", "standard", "uppercase".
- :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
- :keyword token_filters: An optional list of token filters to use when breaking the given text.
- :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
- :keyword char_filters: An optional list of character filters to use when breaking the given
- text.
- :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
+ :vartype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
+ :ivar token_filters: An optional list of token filters to use when breaking the given text.
+ :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
+ :ivar char_filters: An optional list of character filters to use when breaking the given text.
+ :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
"""
_validation = {
@@ -125,6 +126,42 @@ def __init__(
char_filters: Optional[List[Union[str, "CharFilterName"]]] = None,
**kwargs
):
+ """
+ :keyword text: Required. The text to break into tokens.
+ :paramtype text: str
+ :keyword analyzer: The name of the analyzer to use to break the given text. Possible values
+ include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft",
+ "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene",
+ "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene",
+ "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene",
+ "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene",
+ "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft",
+ "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft",
+ "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene",
+ "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft",
+ "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene",
+ "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft",
+ "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene",
+ "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
+ "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
+ "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
+ "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
+ "whitespace".
+ :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :keyword tokenizer: The name of the tokenizer to use to break the given text. Possible values
+ include: "classic", "edgeNGram", "keyword_v2", "letter", "lowercase",
+ "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram",
+ "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace".
+ :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
+ :keyword normalizer: The name of the normalizer to use to normalize the given text. Possible
+ values include: "asciifolding", "elision", "lowercase", "standard", "uppercase".
+ :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
+ :keyword token_filters: An optional list of token filters to use when breaking the given text.
+ :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
+ :keyword char_filters: An optional list of character filters to use when breaking the given
+ text.
+ :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
+ """
super(AnalyzeRequest, self).__init__(**kwargs)
self.text = text
self.analyzer = analyzer
@@ -139,9 +176,8 @@ class AnalyzeResult(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword tokens: Required. The list of tokens returned by the analyzer specified in the
- request.
- :paramtype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo]
+ :ivar tokens: Required. The list of tokens returned by the analyzer specified in the request.
+ :vartype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo]
"""
_validation = {
@@ -158,6 +194,11 @@ def __init__(
tokens: List["AnalyzedTokenInfo"],
**kwargs
):
+ """
+ :keyword tokens: Required. The list of tokens returned by the analyzer specified in the
+ request.
+ :paramtype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo]
+ """
super(AnalyzeResult, self).__init__(**kwargs)
self.tokens = tokens
@@ -170,13 +211,13 @@ class TokenFilter(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
+ :vartype name: str
"""
_validation = {
@@ -199,6 +240,12 @@ def __init__(
name: str,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ """
super(TokenFilter, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = name
@@ -209,16 +256,16 @@ class AsciiFoldingTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword preserve_original: A value indicating whether the original token will be kept. Default
- is false.
- :paramtype preserve_original: bool
+ :vartype name: str
+ :ivar preserve_original: A value indicating whether the original token will be kept. Default is
+ false.
+ :vartype preserve_original: bool
"""
_validation = {
@@ -239,6 +286,15 @@ def __init__(
preserve_original: Optional[bool] = False,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword preserve_original: A value indicating whether the original token will be kept. Default
+ is false.
+ :paramtype preserve_original: bool
+ """
super(AsciiFoldingTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.AsciiFoldingTokenFilter' # type: str
self.preserve_original = preserve_original
@@ -249,12 +305,12 @@ class AzureActiveDirectoryApplicationCredentials(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword application_id: Required. An AAD Application ID that was granted the required access
+ :ivar application_id: Required. An AAD Application ID that was granted the required access
permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The
Application ID should not be confused with the Object ID for your AAD Application.
- :paramtype application_id: str
- :keyword application_secret: The authentication key of the specified AAD application.
- :paramtype application_secret: str
+ :vartype application_id: str
+ :ivar application_secret: The authentication key of the specified AAD application.
+ :vartype application_secret: str
"""
_validation = {
@@ -273,6 +329,14 @@ def __init__(
application_secret: Optional[str] = None,
**kwargs
):
+ """
+ :keyword application_id: Required. An AAD Application ID that was granted the required access
+ permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The
+ Application ID should not be confused with the Object ID for your AAD Application.
+ :paramtype application_id: str
+ :keyword application_secret: The authentication key of the specified AAD application.
+ :paramtype application_secret: str
+ """
super(AzureActiveDirectoryApplicationCredentials, self).__init__(**kwargs)
self.application_id = application_id
self.application_secret = application_secret
@@ -286,8 +350,8 @@ class Similarity(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Constant filled by server.
- :paramtype odata_type: str
+ :ivar odata_type: Required. Constant filled by server.
+ :vartype odata_type: str
"""
_validation = {
@@ -306,6 +370,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(Similarity, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
@@ -315,16 +381,16 @@ class BM25Similarity(Similarity):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Constant filled by server.
- :paramtype odata_type: str
- :keyword k1: This property controls the scaling function between the term frequency of each
+ :ivar odata_type: Required. Constant filled by server.
+ :vartype odata_type: str
+ :ivar k1: This property controls the scaling function between the term frequency of each
matching terms and the final relevance score of a document-query pair. By default, a value of
1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency.
- :paramtype k1: float
- :keyword b: This property controls how the length of a document affects the relevance score. By
+ :vartype k1: float
+ :ivar b: This property controls how the length of a document affects the relevance score. By
default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied,
while a value of 1.0 means the score is fully normalized by the length of the document.
- :paramtype b: float
+ :vartype b: float
"""
_validation = {
@@ -344,6 +410,16 @@ def __init__(
b: Optional[float] = None,
**kwargs
):
+ """
+ :keyword k1: This property controls the scaling function between the term frequency of each
+ matching terms and the final relevance score of a document-query pair. By default, a value of
+ 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency.
+ :paramtype k1: float
+ :keyword b: This property controls how the length of a document affects the relevance score. By
+ default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied,
+ while a value of 1.0 means the score is fully normalized by the length of the document.
+ :paramtype b: float
+ """
super(BM25Similarity, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.BM25Similarity' # type: str
self.k1 = k1
@@ -358,13 +434,13 @@ class CharFilter(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the char filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the char filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the char filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
+ :vartype name: str
"""
_validation = {
@@ -387,6 +463,12 @@ def __init__(
name: str,
**kwargs
):
+ """
+ :keyword name: Required. The name of the char filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ """
super(CharFilter, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = name
@@ -397,19 +479,19 @@ class CjkBigramTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword ignore_scripts: The scripts to ignore.
- :paramtype ignore_scripts: list[str or
+ :vartype name: str
+ :ivar ignore_scripts: The scripts to ignore.
+ :vartype ignore_scripts: list[str or
~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts]
- :keyword output_unigrams: A value indicating whether to output both unigrams and bigrams (if
+ :ivar output_unigrams: A value indicating whether to output both unigrams and bigrams (if
true), or just bigrams (if false). Default is false.
- :paramtype output_unigrams: bool
+ :vartype output_unigrams: bool
"""
_validation = {
@@ -432,6 +514,18 @@ def __init__(
output_unigrams: Optional[bool] = False,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword ignore_scripts: The scripts to ignore.
+ :paramtype ignore_scripts: list[str or
+ ~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts]
+ :keyword output_unigrams: A value indicating whether to output both unigrams and bigrams (if
+ true), or just bigrams (if false). Default is false.
+ :paramtype output_unigrams: bool
+ """
super(CjkBigramTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.CjkBigramTokenFilter' # type: str
self.ignore_scripts = ignore_scripts
@@ -443,8 +537,8 @@ class ClassicSimilarity(Similarity):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Constant filled by server.
- :paramtype odata_type: str
+ :ivar odata_type: Required. Constant filled by server.
+ :vartype odata_type: str
"""
_validation = {
@@ -459,6 +553,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(ClassicSimilarity, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.ClassicSimilarity' # type: str
@@ -471,13 +567,13 @@ class LexicalTokenizer(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
"""
_validation = {
@@ -500,6 +596,12 @@ def __init__(
name: str,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ """
super(LexicalTokenizer, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = name
@@ -510,16 +612,16 @@ class ClassicTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
- :paramtype max_token_length: int
+ :vartype max_token_length: int
"""
_validation = {
@@ -541,6 +643,15 @@ def __init__(
max_token_length: Optional[int] = 255,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ maximum length are split. The maximum token length that can be used is 300 characters.
+ :paramtype max_token_length: int
+ """
super(ClassicTokenizer, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.ClassicTokenizer' # type: str
self.max_token_length = max_token_length
@@ -554,11 +665,11 @@ class CognitiveServicesAccount(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the cognitive service resource
+ :ivar odata_type: Required. Identifies the concrete type of the cognitive service resource
attached to a skillset.Constant filled by server.
- :paramtype odata_type: str
- :keyword description: Description of the cognitive service resource attached to a skillset.
- :paramtype description: str
+ :vartype odata_type: str
+ :ivar description: Description of the cognitive service resource attached to a skillset.
+ :vartype description: str
"""
_validation = {
@@ -580,6 +691,10 @@ def __init__(
description: Optional[str] = None,
**kwargs
):
+ """
+ :keyword description: Description of the cognitive service resource attached to a skillset.
+ :paramtype description: str
+ """
super(CognitiveServicesAccount, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.description = description
@@ -590,14 +705,14 @@ class CognitiveServicesAccountKey(CognitiveServicesAccount):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the cognitive service resource
+ :ivar odata_type: Required. Identifies the concrete type of the cognitive service resource
attached to a skillset.Constant filled by server.
- :paramtype odata_type: str
- :keyword description: Description of the cognitive service resource attached to a skillset.
- :paramtype description: str
- :keyword key: Required. The key used to provision the cognitive service resource attached to a
+ :vartype odata_type: str
+ :ivar description: Description of the cognitive service resource attached to a skillset.
+ :vartype description: str
+ :ivar key: Required. The key used to provision the cognitive service resource attached to a
skillset.
- :paramtype key: str
+ :vartype key: str
"""
_validation = {
@@ -618,6 +733,13 @@ def __init__(
description: Optional[str] = None,
**kwargs
):
+ """
+ :keyword description: Description of the cognitive service resource attached to a skillset.
+ :paramtype description: str
+ :keyword key: Required. The key used to provision the cognitive service resource attached to a
+ skillset.
+ :paramtype key: str
+ """
super(CognitiveServicesAccountKey, self).__init__(description=description, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.CognitiveServicesByKey' # type: str
self.key = key
@@ -628,22 +750,22 @@ class CommonGramTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword common_words: Required. The set of common words.
- :paramtype common_words: list[str]
- :keyword ignore_case: A value indicating whether common words matching will be case
- insensitive. Default is false.
- :paramtype ignore_case: bool
- :keyword use_query_mode: A value that indicates whether the token filter is in query mode. When
- in query mode, the token filter generates bigrams and then removes common words and single
- terms followed by a common word. Default is false.
- :paramtype use_query_mode: bool
+ :vartype name: str
+ :ivar common_words: Required. The set of common words.
+ :vartype common_words: list[str]
+ :ivar ignore_case: A value indicating whether common words matching will be case insensitive.
+ Default is false.
+ :vartype ignore_case: bool
+ :ivar use_query_mode: A value that indicates whether the token filter is in query mode. When in
+ query mode, the token filter generates bigrams and then removes common words and single terms
+ followed by a common word. Default is false.
+ :vartype use_query_mode: bool
"""
_validation = {
@@ -669,6 +791,21 @@ def __init__(
use_query_mode: Optional[bool] = False,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword common_words: Required. The set of common words.
+ :paramtype common_words: list[str]
+ :keyword ignore_case: A value indicating whether common words matching will be case
+ insensitive. Default is false.
+ :paramtype ignore_case: bool
+ :keyword use_query_mode: A value that indicates whether the token filter is in query mode. When
+ in query mode, the token filter generates bigrams and then removes common words and single
+ terms followed by a common word. Default is false.
+ :paramtype use_query_mode: bool
+ """
super(CommonGramTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.CommonGramTokenFilter' # type: str
self.common_words = common_words
@@ -684,26 +821,25 @@ class SearchIndexerSkill(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
"""
_validation = {
@@ -735,6 +871,25 @@ def __init__(
context: Optional[str] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ """
super(SearchIndexerSkill, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = name
@@ -749,26 +904,25 @@ class ConditionalSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
"""
_validation = {
@@ -796,6 +950,25 @@ def __init__(
context: Optional[str] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ """
super(ConditionalSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Util.ConditionalSkill' # type: str
@@ -805,14 +978,14 @@ class CorsOptions(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword allowed_origins: Required. The list of origins from which JavaScript code will be
- granted access to your index. Can contain a list of hosts of the form
+ :ivar allowed_origins: Required. The list of origins from which JavaScript code will be granted
+ access to your index. Can contain a list of hosts of the form
{protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not
recommended).
- :paramtype allowed_origins: list[str]
- :keyword max_age_in_seconds: The duration for which browsers should cache CORS preflight
+ :vartype allowed_origins: list[str]
+ :ivar max_age_in_seconds: The duration for which browsers should cache CORS preflight
responses. Defaults to 5 minutes.
- :paramtype max_age_in_seconds: long
+ :vartype max_age_in_seconds: long
"""
_validation = {
@@ -831,6 +1004,16 @@ def __init__(
max_age_in_seconds: Optional[int] = None,
**kwargs
):
+ """
+ :keyword allowed_origins: Required. The list of origins from which JavaScript code will be
+ granted access to your index. Can contain a list of hosts of the form
+ {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not
+ recommended).
+ :paramtype allowed_origins: list[str]
+ :keyword max_age_in_seconds: The duration for which browsers should cache CORS preflight
+ responses. Defaults to 5 minutes.
+ :paramtype max_age_in_seconds: long
+ """
super(CorsOptions, self).__init__(**kwargs)
self.allowed_origins = allowed_origins
self.max_age_in_seconds = max_age_in_seconds
@@ -844,13 +1027,13 @@ class LexicalAnalyzer(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
+ :vartype odata_type: str
+ :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
"""
_validation = {
@@ -873,6 +1056,12 @@ def __init__(
name: str,
**kwargs
):
+ """
+ :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ """
super(LexicalAnalyzer, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.name = name
@@ -883,27 +1072,27 @@ class CustomAnalyzer(LexicalAnalyzer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword tokenizer: Required. The name of the tokenizer to use to divide continuous text into a
+ :vartype odata_type: str
+ :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar tokenizer: Required. The name of the tokenizer to use to divide continuous text into a
sequence of tokens, such as breaking a sentence into words. Possible values include: "classic",
"edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer",
"microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern",
"standard_v2", "uax_url_email", "whitespace".
- :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
- :keyword token_filters: A list of token filters used to filter out or modify the tokens
- generated by a tokenizer. For example, you can specify a lowercase filter that converts all
- characters to lowercase. The filters are run in the order in which they are listed.
- :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
- :keyword char_filters: A list of character filters used to prepare input text before it is
+ :vartype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
+ :ivar token_filters: A list of token filters used to filter out or modify the tokens generated
+ by a tokenizer. For example, you can specify a lowercase filter that converts all characters to
+ lowercase. The filters are run in the order in which they are listed.
+ :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
+ :ivar char_filters: A list of character filters used to prepare input text before it is
processed by the tokenizer. For instance, they can replace certain characters or symbols. The
filters are run in the order in which they are listed.
- :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
+ :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
"""
_validation = {
@@ -929,6 +1118,26 @@ def __init__(
char_filters: Optional[List[Union[str, "CharFilterName"]]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword tokenizer: Required. The name of the tokenizer to use to divide continuous text into a
+ sequence of tokens, such as breaking a sentence into words. Possible values include: "classic",
+ "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer",
+ "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern",
+ "standard_v2", "uax_url_email", "whitespace".
+ :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName
+ :keyword token_filters: A list of token filters used to filter out or modify the tokens
+ generated by a tokenizer. For example, you can specify a lowercase filter that converts all
+ characters to lowercase. The filters are run in the order in which they are listed.
+ :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
+ :keyword char_filters: A list of character filters used to prepare input text before it is
+ processed by the tokenizer. For instance, they can replace certain characters or symbols. The
+ filters are run in the order in which they are listed.
+ :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
+ """
super(CustomAnalyzer, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer' # type: str
self.tokenizer = tokenizer
@@ -941,51 +1150,51 @@ class CustomEntity(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The top-level entity descriptor. Matches in the skill output will be
+ :ivar name: Required. The top-level entity descriptor. Matches in the skill output will be
grouped by this name, and it should represent the "normalized" form of the text being found.
- :paramtype name: str
- :keyword description: This field can be used as a passthrough for custom metadata about the
+ :vartype name: str
+ :ivar description: This field can be used as a passthrough for custom metadata about the
matched text(s). The value of this field will appear with every match of its entity in the
skill output.
- :paramtype description: str
- :keyword type: This field can be used as a passthrough for custom metadata about the matched
+ :vartype description: str
+ :ivar type: This field can be used as a passthrough for custom metadata about the matched
text(s). The value of this field will appear with every match of its entity in the skill
output.
- :paramtype type: str
- :keyword subtype: This field can be used as a passthrough for custom metadata about the matched
+ :vartype type: str
+ :ivar subtype: This field can be used as a passthrough for custom metadata about the matched
text(s). The value of this field will appear with every match of its entity in the skill
output.
- :paramtype subtype: str
- :keyword id: This field can be used as a passthrough for custom metadata about the matched
+ :vartype subtype: str
+ :ivar id: This field can be used as a passthrough for custom metadata about the matched
text(s). The value of this field will appear with every match of its entity in the skill
output.
- :paramtype id: str
- :keyword case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the
+ :vartype id: str
+ :ivar case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the
entity name should be sensitive to character casing. Sample case insensitive matches of
"Microsoft" could be: microsoft, microSoft, MICROSOFT.
- :paramtype case_sensitive: bool
- :keyword accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with
- the entity name should be sensitive to accent.
- :paramtype accent_sensitive: bool
- :keyword fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number
- of divergent characters that would still constitute a match with the entity name. The smallest
+ :vartype case_sensitive: bool
+ :ivar accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with the
+ entity name should be sensitive to accent.
+ :vartype accent_sensitive: bool
+ :ivar fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number of
+ divergent characters that would still constitute a match with the entity name. The smallest
possible fuzziness for any given match is returned. For instance, if the edit distance is set
to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case
sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but
otherwise do.
- :paramtype fuzzy_edit_distance: int
- :keyword default_case_sensitive: Changes the default case sensitivity value for this entity. It
- be used to change the default value of all aliases caseSensitive values.
- :paramtype default_case_sensitive: bool
- :keyword default_accent_sensitive: Changes the default accent sensitivity value for this
- entity. It be used to change the default value of all aliases accentSensitive values.
- :paramtype default_accent_sensitive: bool
- :keyword default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this
+ :vartype fuzzy_edit_distance: int
+ :ivar default_case_sensitive: Changes the default case sensitivity value for this entity. It be
+ used to change the default value of all aliases caseSensitive values.
+ :vartype default_case_sensitive: bool
+ :ivar default_accent_sensitive: Changes the default accent sensitivity value for this entity.
+ It be used to change the default value of all aliases accentSensitive values.
+ :vartype default_accent_sensitive: bool
+ :ivar default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this
entity. It can be used to change the default value of all aliases fuzzyEditDistance values.
- :paramtype default_fuzzy_edit_distance: int
- :keyword aliases: An array of complex objects that can be used to specify alternative spellings
- or synonyms to the root entity name.
- :paramtype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias]
+ :vartype default_fuzzy_edit_distance: int
+ :ivar aliases: An array of complex objects that can be used to specify alternative spellings or
+ synonyms to the root entity name.
+ :vartype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias]
"""
_validation = {
@@ -1024,6 +1233,53 @@ def __init__(
aliases: Optional[List["CustomEntityAlias"]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The top-level entity descriptor. Matches in the skill output will be
+ grouped by this name, and it should represent the "normalized" form of the text being found.
+ :paramtype name: str
+ :keyword description: This field can be used as a passthrough for custom metadata about the
+ matched text(s). The value of this field will appear with every match of its entity in the
+ skill output.
+ :paramtype description: str
+ :keyword type: This field can be used as a passthrough for custom metadata about the matched
+ text(s). The value of this field will appear with every match of its entity in the skill
+ output.
+ :paramtype type: str
+ :keyword subtype: This field can be used as a passthrough for custom metadata about the matched
+ text(s). The value of this field will appear with every match of its entity in the skill
+ output.
+ :paramtype subtype: str
+ :keyword id: This field can be used as a passthrough for custom metadata about the matched
+ text(s). The value of this field will appear with every match of its entity in the skill
+ output.
+ :paramtype id: str
+ :keyword case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the
+ entity name should be sensitive to character casing. Sample case insensitive matches of
+ "Microsoft" could be: microsoft, microSoft, MICROSOFT.
+ :paramtype case_sensitive: bool
+ :keyword accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with
+ the entity name should be sensitive to accent.
+ :paramtype accent_sensitive: bool
+ :keyword fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number
+ of divergent characters that would still constitute a match with the entity name. The smallest
+ possible fuzziness for any given match is returned. For instance, if the edit distance is set
+ to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case
+ sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but
+ otherwise do.
+ :paramtype fuzzy_edit_distance: int
+ :keyword default_case_sensitive: Changes the default case sensitivity value for this entity. It
+ be used to change the default value of all aliases caseSensitive values.
+ :paramtype default_case_sensitive: bool
+ :keyword default_accent_sensitive: Changes the default accent sensitivity value for this
+ entity. It be used to change the default value of all aliases accentSensitive values.
+ :paramtype default_accent_sensitive: bool
+ :keyword default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this
+ entity. It can be used to change the default value of all aliases fuzzyEditDistance values.
+ :paramtype default_fuzzy_edit_distance: int
+ :keyword aliases: An array of complex objects that can be used to specify alternative spellings
+ or synonyms to the root entity name.
+ :paramtype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias]
+ """
super(CustomEntity, self).__init__(**kwargs)
self.name = name
self.description = description
@@ -1044,14 +1300,14 @@ class CustomEntityAlias(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword text: Required. The text of the alias.
- :paramtype text: str
- :keyword case_sensitive: Determine if the alias is case sensitive.
- :paramtype case_sensitive: bool
- :keyword accent_sensitive: Determine if the alias is accent sensitive.
- :paramtype accent_sensitive: bool
- :keyword fuzzy_edit_distance: Determine the fuzzy edit distance of the alias.
- :paramtype fuzzy_edit_distance: int
+ :ivar text: Required. The text of the alias.
+ :vartype text: str
+ :ivar case_sensitive: Determine if the alias is case sensitive.
+ :vartype case_sensitive: bool
+ :ivar accent_sensitive: Determine if the alias is accent sensitive.
+ :vartype accent_sensitive: bool
+ :ivar fuzzy_edit_distance: Determine the fuzzy edit distance of the alias.
+ :vartype fuzzy_edit_distance: int
"""
_validation = {
@@ -1074,6 +1330,16 @@ def __init__(
fuzzy_edit_distance: Optional[int] = None,
**kwargs
):
+ """
+ :keyword text: Required. The text of the alias.
+ :paramtype text: str
+ :keyword case_sensitive: Determine if the alias is case sensitive.
+ :paramtype case_sensitive: bool
+ :keyword accent_sensitive: Determine if the alias is accent sensitive.
+ :paramtype accent_sensitive: bool
+ :keyword fuzzy_edit_distance: Determine the fuzzy edit distance of the alias.
+ :paramtype fuzzy_edit_distance: int
+ """
super(CustomEntityAlias, self).__init__(**kwargs)
self.text = text
self.case_sensitive = case_sensitive
@@ -1086,47 +1352,45 @@ class CustomEntityLookupSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt".
- :paramtype default_language_code: str or
+ :vartype default_language_code: str or
~azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage
- :keyword entities_definition_uri: Path to a JSON or CSV file containing all the target text to
+ :ivar entities_definition_uri: Path to a JSON or CSV file containing all the target text to
match against. This entity definition is read at the beginning of an indexer run. Any updates
to this file during an indexer run will not take effect until subsequent runs. This config must
be accessible over HTTPS.
- :paramtype entities_definition_uri: str
- :keyword inline_entities_definition: The inline CustomEntity definition.
- :paramtype inline_entities_definition:
- list[~azure.search.documents.indexes.models.CustomEntity]
- :keyword global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is
+ :vartype entities_definition_uri: str
+ :ivar inline_entities_definition: The inline CustomEntity definition.
+ :vartype inline_entities_definition: list[~azure.search.documents.indexes.models.CustomEntity]
+ :ivar global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is not
+ set in CustomEntity, this value will be the default value.
+ :vartype global_default_case_sensitive: bool
+ :ivar global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive is
not set in CustomEntity, this value will be the default value.
- :paramtype global_default_case_sensitive: bool
- :keyword global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive
- is not set in CustomEntity, this value will be the default value.
- :paramtype global_default_accent_sensitive: bool
- :keyword global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If
+ :vartype global_default_accent_sensitive: bool
+ :ivar global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If
FuzzyEditDistance is not set in CustomEntity, this value will be the default value.
- :paramtype global_default_fuzzy_edit_distance: int
+ :vartype global_default_fuzzy_edit_distance: int
"""
_validation = {
@@ -1166,6 +1430,46 @@ def __init__(
global_default_fuzzy_edit_distance: Optional[int] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage
+ :keyword entities_definition_uri: Path to a JSON or CSV file containing all the target text to
+ match against. This entity definition is read at the beginning of an indexer run. Any updates
+ to this file during an indexer run will not take effect until subsequent runs. This config must
+ be accessible over HTTPS.
+ :paramtype entities_definition_uri: str
+ :keyword inline_entities_definition: The inline CustomEntity definition.
+ :paramtype inline_entities_definition:
+ list[~azure.search.documents.indexes.models.CustomEntity]
+ :keyword global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is
+ not set in CustomEntity, this value will be the default value.
+ :paramtype global_default_case_sensitive: bool
+ :keyword global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive
+ is not set in CustomEntity, this value will be the default value.
+ :paramtype global_default_accent_sensitive: bool
+ :keyword global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If
+ FuzzyEditDistance is not set in CustomEntity, this value will be the default value.
+ :paramtype global_default_fuzzy_edit_distance: int
+ """
super(CustomEntityLookupSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Text.CustomEntityLookupSkill' # type: str
self.default_language_code = default_language_code
@@ -1181,13 +1485,13 @@ class LexicalNormalizer(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the normalizer.
- :paramtype odata_type: str
- :keyword name: Required. The name of the normalizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named
- 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'.
- :paramtype name: str
+ :ivar odata_type: Required. Identifies the concrete type of the normalizer.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the normalizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding',
+ 'standard', 'lowercase', 'uppercase', or 'elision'.
+ :vartype name: str
"""
_validation = {
@@ -1207,6 +1511,15 @@ def __init__(
name: str,
**kwargs
):
+ """
+ :keyword odata_type: Required. Identifies the concrete type of the normalizer.
+ :paramtype odata_type: str
+ :keyword name: Required. The name of the normalizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named
+ 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'.
+ :paramtype name: str
+ """
super(LexicalNormalizer, self).__init__(**kwargs)
self.odata_type = odata_type
self.name = name
@@ -1217,21 +1530,21 @@ class CustomNormalizer(LexicalNormalizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the normalizer.
- :paramtype odata_type: str
- :keyword name: Required. The name of the normalizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named
- 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'.
- :paramtype name: str
- :keyword token_filters: A list of token filters used to filter out or modify the input token.
- For example, you can specify a lowercase filter that converts all characters to lowercase. The
+ :ivar odata_type: Required. Identifies the concrete type of the normalizer.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the normalizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding',
+ 'standard', 'lowercase', 'uppercase', or 'elision'.
+ :vartype name: str
+ :ivar token_filters: A list of token filters used to filter out or modify the input token. For
+ example, you can specify a lowercase filter that converts all characters to lowercase. The
filters are run in the order in which they are listed.
- :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
- :keyword char_filters: A list of character filters used to prepare input text before it is
+ :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
+ :ivar char_filters: A list of character filters used to prepare input text before it is
processed. For instance, they can replace certain characters or symbols. The filters are run in
the order in which they are listed.
- :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
+ :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
"""
_validation = {
@@ -1255,6 +1568,23 @@ def __init__(
char_filters: Optional[List[Union[str, "CharFilterName"]]] = None,
**kwargs
):
+ """
+ :keyword odata_type: Required. Identifies the concrete type of the normalizer.
+ :paramtype odata_type: str
+ :keyword name: Required. The name of the normalizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named
+ 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'.
+ :paramtype name: str
+ :keyword token_filters: A list of token filters used to filter out or modify the input token.
+ For example, you can specify a lowercase filter that converts all characters to lowercase. The
+ filters are run in the order in which they are listed.
+ :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName]
+ :keyword char_filters: A list of character filters used to prepare input text before it is
+ processed. For instance, they can replace certain characters or symbols. The filters are run in
+ the order in which they are listed.
+ :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName]
+ """
super(CustomNormalizer, self).__init__(odata_type=odata_type, name=name, **kwargs)
self.token_filters = token_filters
self.char_filters = char_filters
@@ -1268,9 +1598,9 @@ class DataChangeDetectionPolicy(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the data change detection
+ :ivar odata_type: Required. Identifies the concrete type of the data change detection
policy.Constant filled by server.
- :paramtype odata_type: str
+ :vartype odata_type: str
"""
_validation = {
@@ -1289,6 +1619,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(DataChangeDetectionPolicy, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
@@ -1301,9 +1633,9 @@ class DataDeletionDetectionPolicy(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the data deletion detection
+ :ivar odata_type: Required. Identifies the concrete type of the data deletion detection
policy.Constant filled by server.
- :paramtype odata_type: str
+ :vartype odata_type: str
"""
_validation = {
@@ -1322,6 +1654,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(DataDeletionDetectionPolicy, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
@@ -1329,9 +1663,9 @@ def __init__(
class DataSourceCredentials(msrest.serialization.Model):
"""Represents credentials that can be used to connect to a datasource.
- :keyword connection_string: The connection string for the datasource. Set to
- ':code:``' if you do not want the connection string updated.
- :paramtype connection_string: str
+ :ivar connection_string: The connection string for the datasource. Set to ':code:``'
+ if you do not want the connection string updated.
+ :vartype connection_string: str
"""
_attribute_map = {
@@ -1344,6 +1678,11 @@ def __init__(
connection_string: Optional[str] = None,
**kwargs
):
+ """
+ :keyword connection_string: The connection string for the datasource. Set to
+ ':code:``' if you do not want the connection string updated.
+ :paramtype connection_string: str
+ """
super(DataSourceCredentials, self).__init__(**kwargs)
self.connection_string = connection_string
@@ -1353,11 +1692,11 @@ class DefaultCognitiveServicesAccount(CognitiveServicesAccount):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the cognitive service resource
+ :ivar odata_type: Required. Identifies the concrete type of the cognitive service resource
attached to a skillset.Constant filled by server.
- :paramtype odata_type: str
- :keyword description: Description of the cognitive service resource attached to a skillset.
- :paramtype description: str
+ :vartype odata_type: str
+ :ivar description: Description of the cognitive service resource attached to a skillset.
+ :vartype description: str
"""
_validation = {
@@ -1375,6 +1714,10 @@ def __init__(
description: Optional[str] = None,
**kwargs
):
+ """
+ :keyword description: Description of the cognitive service resource attached to a skillset.
+ :paramtype description: str
+ """
super(DefaultCognitiveServicesAccount, self).__init__(description=description, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.DefaultCognitiveServices' # type: str
@@ -1384,27 +1727,27 @@ class DictionaryDecompounderTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword word_list: Required. The list of words to match against.
- :paramtype word_list: list[str]
- :keyword min_word_size: The minimum word size. Only words longer than this get processed.
- Default is 5. Maximum is 300.
- :paramtype min_word_size: int
- :keyword min_subword_size: The minimum subword size. Only subwords longer than this are
- outputted. Default is 2. Maximum is 300.
- :paramtype min_subword_size: int
- :keyword max_subword_size: The maximum subword size. Only subwords shorter than this are
+ :vartype name: str
+ :ivar word_list: Required. The list of words to match against.
+ :vartype word_list: list[str]
+ :ivar min_word_size: The minimum word size. Only words longer than this get processed. Default
+ is 5. Maximum is 300.
+ :vartype min_word_size: int
+ :ivar min_subword_size: The minimum subword size. Only subwords longer than this are outputted.
+ Default is 2. Maximum is 300.
+ :vartype min_subword_size: int
+ :ivar max_subword_size: The maximum subword size. Only subwords shorter than this are
outputted. Default is 15. Maximum is 300.
- :paramtype max_subword_size: int
- :keyword only_longest_match: A value indicating whether to add only the longest matching
- subword to the output. Default is false.
- :paramtype only_longest_match: bool
+ :vartype max_subword_size: int
+ :ivar only_longest_match: A value indicating whether to add only the longest matching subword
+ to the output. Default is false.
+ :vartype only_longest_match: bool
"""
_validation = {
@@ -1437,6 +1780,26 @@ def __init__(
only_longest_match: Optional[bool] = False,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword word_list: Required. The list of words to match against.
+ :paramtype word_list: list[str]
+ :keyword min_word_size: The minimum word size. Only words longer than this get processed.
+ Default is 5. Maximum is 300.
+ :paramtype min_word_size: int
+ :keyword min_subword_size: The minimum subword size. Only subwords longer than this are
+ outputted. Default is 2. Maximum is 300.
+ :paramtype min_subword_size: int
+ :keyword max_subword_size: The maximum subword size. Only subwords shorter than this are
+ outputted. Default is 15. Maximum is 300.
+ :paramtype max_subword_size: int
+ :keyword only_longest_match: A value indicating whether to add only the longest matching
+ subword to the output. Default is false.
+ :paramtype only_longest_match: bool
+ """
super(DictionaryDecompounderTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' # type: str
self.word_list = word_list
@@ -1454,18 +1817,18 @@ class ScoringFunction(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword type: Required. Indicates the type of function to use. Valid values include magnitude,
+ :ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
- :paramtype type: str
- :keyword field_name: Required. The name of the field used as input to the scoring function.
- :paramtype field_name: str
- :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
- to 1.0.
- :paramtype boost: float
- :keyword interpolation: A value indicating how boosting will be interpolated across document
+ :vartype type: str
+ :ivar field_name: Required. The name of the field used as input to the scoring function.
+ :vartype field_name: str
+ :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
+ 1.0.
+ :vartype boost: float
+ :ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
- :paramtype interpolation: str or
+ :vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
"""
@@ -1494,6 +1857,18 @@ def __init__(
interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None,
**kwargs
):
+ """
+ :keyword field_name: Required. The name of the field used as input to the scoring function.
+ :paramtype field_name: str
+ :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
+ to 1.0.
+ :paramtype boost: float
+ :keyword interpolation: A value indicating how boosting will be interpolated across document
+ scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
+ "logarithmic".
+ :paramtype interpolation: str or
+ ~azure.search.documents.indexes.models.ScoringFunctionInterpolation
+ """
super(ScoringFunction, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.field_name = field_name
@@ -1506,21 +1881,21 @@ class DistanceScoringFunction(ScoringFunction):
All required parameters must be populated in order to send to Azure.
- :keyword type: Required. Indicates the type of function to use. Valid values include magnitude,
+ :ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
- :paramtype type: str
- :keyword field_name: Required. The name of the field used as input to the scoring function.
- :paramtype field_name: str
- :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
- to 1.0.
- :paramtype boost: float
- :keyword interpolation: A value indicating how boosting will be interpolated across document
+ :vartype type: str
+ :ivar field_name: Required. The name of the field used as input to the scoring function.
+ :vartype field_name: str
+ :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
+ 1.0.
+ :vartype boost: float
+ :ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
- :paramtype interpolation: str or
+ :vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
- :keyword parameters: Required. Parameter values for the distance scoring function.
- :paramtype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters
+ :ivar parameters: Required. Parameter values for the distance scoring function.
+ :vartype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters
"""
_validation = {
@@ -1547,6 +1922,20 @@ def __init__(
interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None,
**kwargs
):
+ """
+ :keyword field_name: Required. The name of the field used as input to the scoring function.
+ :paramtype field_name: str
+ :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
+ to 1.0.
+ :paramtype boost: float
+ :keyword interpolation: A value indicating how boosting will be interpolated across document
+ scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
+ "logarithmic".
+ :paramtype interpolation: str or
+ ~azure.search.documents.indexes.models.ScoringFunctionInterpolation
+ :keyword parameters: Required. Parameter values for the distance scoring function.
+ :paramtype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters
+ """
super(DistanceScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs)
self.type = 'distance' # type: str
self.parameters = parameters
@@ -1557,12 +1946,12 @@ class DistanceScoringParameters(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword reference_point_parameter: Required. The name of the parameter passed in search
- queries to specify the reference location.
- :paramtype reference_point_parameter: str
- :keyword boosting_distance: Required. The distance in kilometers from the reference location
- where the boosting range ends.
- :paramtype boosting_distance: float
+ :ivar reference_point_parameter: Required. The name of the parameter passed in search queries
+ to specify the reference location.
+ :vartype reference_point_parameter: str
+ :ivar boosting_distance: Required. The distance in kilometers from the reference location where
+ the boosting range ends.
+ :vartype boosting_distance: float
"""
_validation = {
@@ -1582,6 +1971,14 @@ def __init__(
boosting_distance: float,
**kwargs
):
+ """
+ :keyword reference_point_parameter: Required. The name of the parameter passed in search
+ queries to specify the reference location.
+ :paramtype reference_point_parameter: str
+ :keyword boosting_distance: Required. The distance in kilometers from the reference location
+ where the boosting range ends.
+ :paramtype boosting_distance: float
+ """
super(DistanceScoringParameters, self).__init__(**kwargs)
self.reference_point_parameter = reference_point_parameter
self.boosting_distance = boosting_distance
@@ -1592,33 +1989,32 @@ class DocumentExtractionSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined.
- :paramtype parsing_mode: str
- :keyword data_to_extract: The type of data to be extracted for the skill. Will be set to
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined.
+ :vartype parsing_mode: str
+ :ivar data_to_extract: The type of data to be extracted for the skill. Will be set to
'contentAndMetadata' if not defined.
- :paramtype data_to_extract: str
- :keyword configuration: A dictionary of configurations for the skill.
- :paramtype configuration: dict[str, any]
+ :vartype data_to_extract: str
+ :ivar configuration: A dictionary of configurations for the skill.
+ :vartype configuration: dict[str, any]
"""
_validation = {
@@ -1652,6 +2048,32 @@ def __init__(
configuration: Optional[Dict[str, Any]] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined.
+ :paramtype parsing_mode: str
+ :keyword data_to_extract: The type of data to be extracted for the skill. Will be set to
+ 'contentAndMetadata' if not defined.
+ :paramtype data_to_extract: str
+ :keyword configuration: A dictionary of configurations for the skill.
+ :paramtype configuration: dict[str, any]
+ """
super(DocumentExtractionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Util.DocumentExtractionSkill' # type: str
self.parsing_mode = parsing_mode
@@ -1659,26 +2081,58 @@ def __init__(
self.configuration = configuration
+class DocumentKeysOrIds(msrest.serialization.Model):
+ """DocumentKeysOrIds.
+
+ :ivar document_keys: document keys to be reset.
+ :vartype document_keys: list[str]
+ :ivar datasource_document_ids: datasource document identifiers to be reset.
+ :vartype datasource_document_ids: list[str]
+ """
+
+ _attribute_map = {
+ 'document_keys': {'key': 'documentKeys', 'type': '[str]'},
+ 'datasource_document_ids': {'key': 'datasourceDocumentIds', 'type': '[str]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ document_keys: Optional[List[str]] = None,
+ datasource_document_ids: Optional[List[str]] = None,
+ **kwargs
+ ):
+ """
+ :keyword document_keys: document keys to be reset.
+ :paramtype document_keys: list[str]
+ :keyword datasource_document_ids: datasource document identifiers to be reset.
+ :paramtype datasource_document_ids: list[str]
+ """
+ super(DocumentKeysOrIds, self).__init__(**kwargs)
+ self.document_keys = document_keys
+ self.datasource_document_ids = datasource_document_ids
+
+
class EdgeNGramTokenFilter(TokenFilter):
"""Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
+ :vartype name: str
+ :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
maxGram.
- :paramtype min_gram: int
- :keyword max_gram: The maximum n-gram length. Default is 2.
- :paramtype max_gram: int
- :keyword side: Specifies which side of the input the n-gram should be generated from. Default
- is "front". Possible values include: "front", "back".
- :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
+ :vartype min_gram: int
+ :ivar max_gram: The maximum n-gram length. Default is 2.
+ :vartype max_gram: int
+ :ivar side: Specifies which side of the input the n-gram should be generated from. Default is
+ "front". Possible values include: "front", "back".
+ :vartype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
"""
_validation = {
@@ -1703,6 +2157,20 @@ def __init__(
side: Optional[Union[str, "EdgeNGramTokenFilterSide"]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
+ maxGram.
+ :paramtype min_gram: int
+ :keyword max_gram: The maximum n-gram length. Default is 2.
+ :paramtype max_gram: int
+ :keyword side: Specifies which side of the input the n-gram should be generated from. Default
+ is "front". Possible values include: "front", "back".
+ :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
+ """
super(EdgeNGramTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilter' # type: str
self.min_gram = min_gram
@@ -1715,21 +2183,21 @@ class EdgeNGramTokenFilterV2(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
- the value of maxGram.
- :paramtype min_gram: int
- :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
- :paramtype max_gram: int
- :keyword side: Specifies which side of the input the n-gram should be generated from. Default
- is "front". Possible values include: "front", "back".
- :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
+ :vartype name: str
+ :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the
+ value of maxGram.
+ :vartype min_gram: int
+ :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :vartype max_gram: int
+ :ivar side: Specifies which side of the input the n-gram should be generated from. Default is
+ "front". Possible values include: "front", "back".
+ :vartype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
"""
_validation = {
@@ -1756,6 +2224,20 @@ def __init__(
side: Optional[Union[str, "EdgeNGramTokenFilterSide"]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
+ the value of maxGram.
+ :paramtype min_gram: int
+ :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :paramtype max_gram: int
+ :keyword side: Specifies which side of the input the n-gram should be generated from. Default
+ is "front". Possible values include: "front", "back".
+ :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide
+ """
super(EdgeNGramTokenFilterV2, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2' # type: str
self.min_gram = min_gram
@@ -1768,20 +2250,20 @@ class EdgeNGramTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
- the value of maxGram.
- :paramtype min_gram: int
- :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
- :paramtype max_gram: int
- :keyword token_chars: Character classes to keep in the tokens.
- :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the
+ value of maxGram.
+ :vartype min_gram: int
+ :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :vartype max_gram: int
+ :ivar token_chars: Character classes to keep in the tokens.
+ :vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
"""
_validation = {
@@ -1808,6 +2290,19 @@ def __init__(
token_chars: Optional[List[Union[str, "TokenCharacterKind"]]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
+ the value of maxGram.
+ :paramtype min_gram: int
+ :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :paramtype max_gram: int
+ :keyword token_chars: Character classes to keep in the tokens.
+ :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
+ """
super(EdgeNGramTokenizer, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenizer' # type: str
self.min_gram = min_gram
@@ -1820,15 +2315,15 @@ class ElisionTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword articles: The set of articles to remove.
- :paramtype articles: list[str]
+ :vartype name: str
+ :ivar articles: The set of articles to remove.
+ :vartype articles: list[str]
"""
_validation = {
@@ -1849,6 +2344,14 @@ def __init__(
articles: Optional[List[str]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword articles: The set of articles to remove.
+ :paramtype articles: list[str]
+ """
super(ElisionTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.ElisionTokenFilter' # type: str
self.articles = articles
@@ -1859,36 +2362,35 @@ class EntityLinkingSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
- :paramtype default_language_code: str
- :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
+ :vartype default_language_code: str
+ :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
- :paramtype minimum_precision: float
- :keyword model_version: The version of the model to use when calling the Text Analytics
- service. It will default to the latest available when not specified. We recommend you do not
- specify this value unless absolutely necessary.
- :paramtype model_version: str
+ :vartype minimum_precision: float
+ :ivar model_version: The version of the model to use when calling the Text Analytics service.
+ It will default to the latest available when not specified. We recommend you do not specify
+ this value unless absolutely necessary.
+ :vartype model_version: str
"""
_validation = {
@@ -1923,6 +2425,35 @@ def __init__(
model_version: Optional[str] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :paramtype default_language_code: str
+ :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ confidence score is greater than the value specified. If not set (default), or if explicitly
+ set to null, all entities will be included.
+ :paramtype minimum_precision: float
+ :keyword model_version: The version of the model to use when calling the Text Analytics
+ service. It will default to the latest available when not specified. We recommend you do not
+ specify this value unless absolutely necessary.
+ :paramtype model_version: str
+ """
super(EntityLinkingSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Text.V3.EntityLinkingSkill' # type: str
self.default_language_code = default_language_code
@@ -1935,42 +2466,41 @@ class EntityRecognitionSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword categories: A list of entity categories that should be extracted.
- :paramtype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar categories: A list of entity categories that should be extracted.
+ :vartype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de",
"el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr".
- :paramtype default_language_code: str or
+ :vartype default_language_code: str or
~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage
- :keyword include_typeless_entities: Determines whether or not to include entities which are
- well known but don't conform to a pre-defined type. If this configuration is not set (default),
- set to null or set to false, entities which don't conform to one of the pre-defined types will
- not be surfaced.
- :paramtype include_typeless_entities: bool
- :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ :ivar include_typeless_entities: Determines whether or not to include entities which are well
+ known but don't conform to a pre-defined type. If this configuration is not set (default), set
+ to null or set to false, entities which don't conform to one of the pre-defined types will not
+ be surfaced.
+ :vartype include_typeless_entities: bool
+ :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
- :paramtype minimum_precision: float
+ :vartype minimum_precision: float
"""
_validation = {
@@ -2006,6 +2536,41 @@ def __init__(
minimum_precision: Optional[float] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword categories: A list of entity categories that should be extracted.
+ :paramtype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de",
+ "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage
+ :keyword include_typeless_entities: Determines whether or not to include entities which are
+ well known but don't conform to a pre-defined type. If this configuration is not set (default),
+ set to null or set to false, entities which don't conform to one of the pre-defined types will
+ not be surfaced.
+ :paramtype include_typeless_entities: bool
+ :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ confidence score is greater than the value specified. If not set (default), or if explicitly
+ set to null, all entities will be included.
+ :paramtype minimum_precision: float
+ """
super(EntityRecognitionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Text.EntityRecognitionSkill' # type: str
self.categories = categories
@@ -2019,38 +2584,37 @@ class EntityRecognitionSkillV3(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword categories: A list of entity categories that should be extracted.
- :paramtype categories: list[str]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
- :paramtype default_language_code: str
- :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar categories: A list of entity categories that should be extracted.
+ :vartype categories: list[str]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
+ :vartype default_language_code: str
+ :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
- :paramtype minimum_precision: float
- :keyword model_version: The version of the model to use when calling the Text Analytics
- service. It will default to the latest available when not specified. We recommend you do not
- specify this value unless absolutely necessary.
- :paramtype model_version: str
+ :vartype minimum_precision: float
+ :ivar model_version: The version of the model to use when calling the Text Analytics service.
+ It will default to the latest available when not specified. We recommend you do not specify
+ this value unless absolutely necessary.
+ :vartype model_version: str
"""
_validation = {
@@ -2087,6 +2651,37 @@ def __init__(
model_version: Optional[str] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword categories: A list of entity categories that should be extracted.
+ :paramtype categories: list[str]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :paramtype default_language_code: str
+ :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ confidence score is greater than the value specified. If not set (default), or if explicitly
+ set to null, all entities will be included.
+ :paramtype minimum_precision: float
+ :keyword model_version: The version of the model to use when calling the Text Analytics
+ service. It will default to the latest available when not specified. We recommend you do not
+ specify this value unless absolutely necessary.
+ :paramtype model_version: str
+ """
super(EntityRecognitionSkillV3, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Text.V3.EntityRecognitionSkill' # type: str
self.categories = categories
@@ -2100,13 +2695,13 @@ class FieldMapping(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword source_field_name: Required. The name of the field in the data source.
- :paramtype source_field_name: str
- :keyword target_field_name: The name of the target field in the index. Same as the source field
+ :ivar source_field_name: Required. The name of the field in the data source.
+ :vartype source_field_name: str
+ :ivar target_field_name: The name of the target field in the index. Same as the source field
name by default.
- :paramtype target_field_name: str
- :keyword mapping_function: A function to apply to each source field value before indexing.
- :paramtype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction
+ :vartype target_field_name: str
+ :ivar mapping_function: A function to apply to each source field value before indexing.
+ :vartype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction
"""
_validation = {
@@ -2127,6 +2722,15 @@ def __init__(
mapping_function: Optional["FieldMappingFunction"] = None,
**kwargs
):
+ """
+ :keyword source_field_name: Required. The name of the field in the data source.
+ :paramtype source_field_name: str
+ :keyword target_field_name: The name of the target field in the index. Same as the source field
+ name by default.
+ :paramtype target_field_name: str
+ :keyword mapping_function: A function to apply to each source field value before indexing.
+ :paramtype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction
+ """
super(FieldMapping, self).__init__(**kwargs)
self.source_field_name = source_field_name
self.target_field_name = target_field_name
@@ -2138,11 +2742,11 @@ class FieldMappingFunction(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the field mapping function.
- :paramtype name: str
- :keyword parameters: A dictionary of parameter name/value pairs to pass to the function. Each
+ :ivar name: Required. The name of the field mapping function.
+ :vartype name: str
+ :ivar parameters: A dictionary of parameter name/value pairs to pass to the function. Each
value must be of a primitive type.
- :paramtype parameters: dict[str, any]
+ :vartype parameters: dict[str, any]
"""
_validation = {
@@ -2161,6 +2765,13 @@ def __init__(
parameters: Optional[Dict[str, Any]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the field mapping function.
+ :paramtype name: str
+ :keyword parameters: A dictionary of parameter name/value pairs to pass to the function. Each
+ value must be of a primitive type.
+ :paramtype parameters: dict[str, any]
+ """
super(FieldMappingFunction, self).__init__(**kwargs)
self.name = name
self.parameters = parameters
@@ -2171,21 +2782,21 @@ class FreshnessScoringFunction(ScoringFunction):
All required parameters must be populated in order to send to Azure.
- :keyword type: Required. Indicates the type of function to use. Valid values include magnitude,
+ :ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
- :paramtype type: str
- :keyword field_name: Required. The name of the field used as input to the scoring function.
- :paramtype field_name: str
- :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
- to 1.0.
- :paramtype boost: float
- :keyword interpolation: A value indicating how boosting will be interpolated across document
+ :vartype type: str
+ :ivar field_name: Required. The name of the field used as input to the scoring function.
+ :vartype field_name: str
+ :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
+ 1.0.
+ :vartype boost: float
+ :ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
- :paramtype interpolation: str or
+ :vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
- :keyword parameters: Required. Parameter values for the freshness scoring function.
- :paramtype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters
+ :ivar parameters: Required. Parameter values for the freshness scoring function.
+ :vartype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters
"""
_validation = {
@@ -2212,6 +2823,20 @@ def __init__(
interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None,
**kwargs
):
+ """
+ :keyword field_name: Required. The name of the field used as input to the scoring function.
+ :paramtype field_name: str
+ :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
+ to 1.0.
+ :paramtype boost: float
+ :keyword interpolation: A value indicating how boosting will be interpolated across document
+ scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
+ "logarithmic".
+ :paramtype interpolation: str or
+ ~azure.search.documents.indexes.models.ScoringFunctionInterpolation
+ :keyword parameters: Required. Parameter values for the freshness scoring function.
+ :paramtype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters
+ """
super(FreshnessScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs)
self.type = 'freshness' # type: str
self.parameters = parameters
@@ -2222,9 +2847,9 @@ class FreshnessScoringParameters(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword boosting_duration: Required. The expiration period after which boosting will stop for
- a particular document.
- :paramtype boosting_duration: ~datetime.timedelta
+ :ivar boosting_duration: Required. The expiration period after which boosting will stop for a
+ particular document.
+ :vartype boosting_duration: ~datetime.timedelta
"""
_validation = {
@@ -2241,6 +2866,11 @@ def __init__(
boosting_duration: datetime.timedelta,
**kwargs
):
+ """
+ :keyword boosting_duration: Required. The expiration period after which boosting will stop for
+ a particular document.
+ :paramtype boosting_duration: ~datetime.timedelta
+ """
super(FreshnessScoringParameters, self).__init__(**kwargs)
self.boosting_duration = boosting_duration
@@ -2272,6 +2902,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(GetIndexStatisticsResult, self).__init__(**kwargs)
self.document_count = None
self.storage_size = None
@@ -2282,11 +2914,11 @@ class HighWaterMarkChangeDetectionPolicy(DataChangeDetectionPolicy):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the data change detection
+ :ivar odata_type: Required. Identifies the concrete type of the data change detection
policy.Constant filled by server.
- :paramtype odata_type: str
- :keyword high_water_mark_column_name: Required. The name of the high water mark column.
- :paramtype high_water_mark_column_name: str
+ :vartype odata_type: str
+ :ivar high_water_mark_column_name: Required. The name of the high water mark column.
+ :vartype high_water_mark_column_name: str
"""
_validation = {
@@ -2305,6 +2937,10 @@ def __init__(
high_water_mark_column_name: str,
**kwargs
):
+ """
+ :keyword high_water_mark_column_name: Required. The name of the high water mark column.
+ :paramtype high_water_mark_column_name: str
+ """
super(HighWaterMarkChangeDetectionPolicy, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' # type: str
self.high_water_mark_column_name = high_water_mark_column_name
@@ -2315,34 +2951,33 @@ class ImageAnalysisSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "en", "es", "ja", "pt", "zh".
- :paramtype default_language_code: str or
+ :vartype default_language_code: str or
~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage
- :keyword visual_features: A list of visual features.
- :paramtype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature]
- :keyword details: A string indicating which domain-specific details to return.
- :paramtype details: list[str or ~azure.search.documents.indexes.models.ImageDetail]
+ :ivar visual_features: A list of visual features.
+ :vartype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature]
+ :ivar details: A string indicating which domain-specific details to return.
+ :vartype details: list[str or ~azure.search.documents.indexes.models.ImageDetail]
"""
_validation = {
@@ -2376,6 +3011,33 @@ def __init__(
details: Optional[List[Union[str, "ImageDetail"]]] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "en", "es", "ja", "pt", "zh".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage
+ :keyword visual_features: A list of visual features.
+ :paramtype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature]
+ :keyword details: A string indicating which domain-specific details to return.
+ :paramtype details: list[str or ~azure.search.documents.indexes.models.ImageDetail]
+ """
super(ImageAnalysisSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Vision.ImageAnalysisSkill' # type: str
self.default_language_code = default_language_code
@@ -2437,6 +3099,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(IndexerCurrentState, self).__init__(**kwargs)
self.mode = None
self.all_docs_initial_change_tracking_state = None
@@ -2521,6 +3185,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(IndexerExecutionResult, self).__init__(**kwargs)
self.status = None
self.status_detail = None
@@ -2539,19 +3205,18 @@ def __init__(
class IndexingParameters(msrest.serialization.Model):
"""Represents parameters for indexer execution.
- :keyword batch_size: The number of items that are read from the data source and indexed as a
+ :ivar batch_size: The number of items that are read from the data source and indexed as a
single batch in order to improve performance. The default depends on the data source type.
- :paramtype batch_size: int
- :keyword max_failed_items: The maximum number of items that can fail indexing for indexer
+ :vartype batch_size: int
+ :ivar max_failed_items: The maximum number of items that can fail indexing for indexer
execution to still be considered successful. -1 means no limit. Default is 0.
- :paramtype max_failed_items: int
- :keyword max_failed_items_per_batch: The maximum number of items in a single batch that can
- fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0.
- :paramtype max_failed_items_per_batch: int
- :keyword configuration: A dictionary of indexer-specific configuration properties. Each name is
+ :vartype max_failed_items: int
+ :ivar max_failed_items_per_batch: The maximum number of items in a single batch that can fail
+ indexing for the batch to still be considered successful. -1 means no limit. Default is 0.
+ :vartype max_failed_items_per_batch: int
+ :ivar configuration: A dictionary of indexer-specific configuration properties. Each name is
the name of a specific property. Each value must be of a primitive type.
- :paramtype configuration:
- ~azure.search.documents.indexes.models.IndexingParametersConfiguration
+ :vartype configuration: ~azure.search.documents.indexes.models.IndexingParametersConfiguration
"""
_attribute_map = {
@@ -2570,6 +3235,21 @@ def __init__(
configuration: Optional["IndexingParametersConfiguration"] = None,
**kwargs
):
+ """
+ :keyword batch_size: The number of items that are read from the data source and indexed as a
+ single batch in order to improve performance. The default depends on the data source type.
+ :paramtype batch_size: int
+ :keyword max_failed_items: The maximum number of items that can fail indexing for indexer
+ execution to still be considered successful. -1 means no limit. Default is 0.
+ :paramtype max_failed_items: int
+ :keyword max_failed_items_per_batch: The maximum number of items in a single batch that can
+ fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0.
+ :paramtype max_failed_items_per_batch: int
+ :keyword configuration: A dictionary of indexer-specific configuration properties. Each name is
+ the name of a specific property. Each value must be of a primitive type.
+ :paramtype configuration:
+ ~azure.search.documents.indexes.models.IndexingParametersConfiguration
+ """
super(IndexingParameters, self).__init__(**kwargs)
self.batch_size = batch_size
self.max_failed_items = max_failed_items
@@ -2580,73 +3260,73 @@ def __init__(
class IndexingParametersConfiguration(msrest.serialization.Model):
"""A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type.
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
- :paramtype additional_properties: dict[str, any]
- :keyword parsing_mode: Represents the parsing mode for indexing from an Azure blob data source.
+ :vartype additional_properties: dict[str, any]
+ :ivar parsing_mode: Represents the parsing mode for indexing from an Azure blob data source.
Possible values include: "default", "text", "delimitedText", "json", "jsonArray", "jsonLines".
Default value: "default".
- :paramtype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode
- :keyword excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore
- when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip
- over those files during indexing.
- :paramtype excluded_file_name_extensions: str
- :keyword indexed_file_name_extensions: Comma-delimited list of filename extensions to select
- when processing from Azure blob storage. For example, you could focus indexing on specific
+ :vartype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode
+ :ivar excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore when
+ processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip over
+ those files during indexing.
+ :vartype excluded_file_name_extensions: str
+ :ivar indexed_file_name_extensions: Comma-delimited list of filename extensions to select when
+ processing from Azure blob storage. For example, you could focus indexing on specific
application files ".docx, .pptx, .msg" to specifically include those file types.
- :paramtype indexed_file_name_extensions: str
- :keyword fail_on_unsupported_content_type: For Azure blobs, set to false if you want to
- continue indexing when an unsupported content type is encountered, and you don't know all the
- content types (file extensions) in advance.
- :paramtype fail_on_unsupported_content_type: bool
- :keyword fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue
+ :vartype indexed_file_name_extensions: str
+ :ivar fail_on_unsupported_content_type: For Azure blobs, set to false if you want to continue
+ indexing when an unsupported content type is encountered, and you don't know all the content
+ types (file extensions) in advance.
+ :vartype fail_on_unsupported_content_type: bool
+ :ivar fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue
indexing if a document fails indexing.
- :paramtype fail_on_unprocessable_document: bool
- :keyword index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this
- property to true to still index storage metadata for blob content that is too large to process.
+ :vartype fail_on_unprocessable_document: bool
+ :ivar index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this property
+ to true to still index storage metadata for blob content that is too large to process.
Oversized blobs are treated as errors by default. For limits on blob size, see
https://docs.microsoft.com/azure/search/search-limits-quotas-capacity.
- :paramtype index_storage_metadata_only_for_oversized_documents: bool
- :keyword delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column
+ :vartype index_storage_metadata_only_for_oversized_documents: bool
+ :ivar delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column
headers, useful for mapping source fields to destination fields in an index.
- :paramtype delimited_text_headers: str
- :keyword delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character
+ :vartype delimited_text_headers: str
+ :ivar delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character
delimiter for CSV files where each line starts a new document (for example, "|").
- :paramtype delimited_text_delimiter: str
- :keyword first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line
- of each blob contains headers.
- :paramtype first_line_contains_headers: bool
- :keyword document_root: For JSON arrays, given a structured or semi-structured document, you
- can specify a path to the array using this property.
- :paramtype document_root: str
- :keyword data_to_extract: Specifies the data to extract from Azure blob storage and tells the
+ :vartype delimited_text_delimiter: str
+ :ivar first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line of
+ each blob contains headers.
+ :vartype first_line_contains_headers: bool
+ :ivar document_root: For JSON arrays, given a structured or semi-structured document, you can
+ specify a path to the array using this property.
+ :vartype document_root: str
+ :ivar data_to_extract: Specifies the data to extract from Azure blob storage and tells the
indexer which data to extract from image content when "imageAction" is set to a value other
than "none". This applies to embedded image content in a .PDF or other application, or image
files such as .jpg and .png, in Azure blobs. Possible values include: "storageMetadata",
"allMetadata", "contentAndMetadata". Default value: "contentAndMetadata".
- :paramtype data_to_extract: str or
+ :vartype data_to_extract: str or
~azure.search.documents.indexes.models.BlobIndexerDataToExtract
- :keyword image_action: Determines how to process embedded images and image files in Azure blob
+ :ivar image_action: Determines how to process embedded images and image files in Azure blob
storage. Setting the "imageAction" configuration to any value other than "none" requires that
a skillset also be attached to that indexer. Possible values include: "none",
"generateNormalizedImages", "generateNormalizedImagePerPage". Default value: "none".
- :paramtype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction
- :keyword allow_skillset_to_read_file_data: If true, will create a path //document//file_data
- that is an object representing the original file data downloaded from your blob data source.
- This allows you to pass the original file data to a custom skill for processing within the
+ :vartype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction
+ :ivar allow_skillset_to_read_file_data: If true, will create a path //document//file_data that
+ is an object representing the original file data downloaded from your blob data source. This
+ allows you to pass the original file data to a custom skill for processing within the
enrichment pipeline, or to the Document Extraction skill.
- :paramtype allow_skillset_to_read_file_data: bool
- :keyword pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files
- in Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none".
- :paramtype pdf_text_rotation_algorithm: str or
+ :vartype allow_skillset_to_read_file_data: bool
+ :ivar pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files in
+ Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none".
+ :vartype pdf_text_rotation_algorithm: str or
~azure.search.documents.indexes.models.BlobIndexerPDFTextRotationAlgorithm
- :keyword execution_environment: Specifies the environment in which the indexer should execute.
+ :ivar execution_environment: Specifies the environment in which the indexer should execute.
Possible values include: "standard", "private". Default value: "standard".
- :paramtype execution_environment: str or
+ :vartype execution_environment: str or
~azure.search.documents.indexes.models.IndexerExecutionEnvironment
- :keyword query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL
- database data sources, specified in the format "hh:mm:ss".
- :paramtype query_timeout: str
+ :ivar query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL database
+ data sources, specified in the format "hh:mm:ss".
+ :vartype query_timeout: str
"""
_attribute_map = {
@@ -2691,6 +3371,75 @@ def __init__(
query_timeout: Optional[str] = "00:05:00",
**kwargs
):
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ :keyword parsing_mode: Represents the parsing mode for indexing from an Azure blob data source.
+ Possible values include: "default", "text", "delimitedText", "json", "jsonArray", "jsonLines".
+ Default value: "default".
+ :paramtype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode
+ :keyword excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore
+ when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip
+ over those files during indexing.
+ :paramtype excluded_file_name_extensions: str
+ :keyword indexed_file_name_extensions: Comma-delimited list of filename extensions to select
+ when processing from Azure blob storage. For example, you could focus indexing on specific
+ application files ".docx, .pptx, .msg" to specifically include those file types.
+ :paramtype indexed_file_name_extensions: str
+ :keyword fail_on_unsupported_content_type: For Azure blobs, set to false if you want to
+ continue indexing when an unsupported content type is encountered, and you don't know all the
+ content types (file extensions) in advance.
+ :paramtype fail_on_unsupported_content_type: bool
+ :keyword fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue
+ indexing if a document fails indexing.
+ :paramtype fail_on_unprocessable_document: bool
+ :keyword index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this
+ property to true to still index storage metadata for blob content that is too large to process.
+ Oversized blobs are treated as errors by default. For limits on blob size, see
+ https://docs.microsoft.com/azure/search/search-limits-quotas-capacity.
+ :paramtype index_storage_metadata_only_for_oversized_documents: bool
+ :keyword delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column
+ headers, useful for mapping source fields to destination fields in an index.
+ :paramtype delimited_text_headers: str
+ :keyword delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character
+ delimiter for CSV files where each line starts a new document (for example, "|").
+ :paramtype delimited_text_delimiter: str
+ :keyword first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line
+ of each blob contains headers.
+ :paramtype first_line_contains_headers: bool
+ :keyword document_root: For JSON arrays, given a structured or semi-structured document, you
+ can specify a path to the array using this property.
+ :paramtype document_root: str
+ :keyword data_to_extract: Specifies the data to extract from Azure blob storage and tells the
+ indexer which data to extract from image content when "imageAction" is set to a value other
+ than "none". This applies to embedded image content in a .PDF or other application, or image
+ files such as .jpg and .png, in Azure blobs. Possible values include: "storageMetadata",
+ "allMetadata", "contentAndMetadata". Default value: "contentAndMetadata".
+ :paramtype data_to_extract: str or
+ ~azure.search.documents.indexes.models.BlobIndexerDataToExtract
+ :keyword image_action: Determines how to process embedded images and image files in Azure blob
+ storage. Setting the "imageAction" configuration to any value other than "none" requires that
+ a skillset also be attached to that indexer. Possible values include: "none",
+ "generateNormalizedImages", "generateNormalizedImagePerPage". Default value: "none".
+ :paramtype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction
+ :keyword allow_skillset_to_read_file_data: If true, will create a path //document//file_data
+ that is an object representing the original file data downloaded from your blob data source.
+ This allows you to pass the original file data to a custom skill for processing within the
+ enrichment pipeline, or to the Document Extraction skill.
+ :paramtype allow_skillset_to_read_file_data: bool
+ :keyword pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files
+ in Azure blob storage. Possible values include: "none", "detectAngles". Default value: "none".
+ :paramtype pdf_text_rotation_algorithm: str or
+ ~azure.search.documents.indexes.models.BlobIndexerPDFTextRotationAlgorithm
+ :keyword execution_environment: Specifies the environment in which the indexer should execute.
+ Possible values include: "standard", "private". Default value: "standard".
+ :paramtype execution_environment: str or
+ ~azure.search.documents.indexes.models.IndexerExecutionEnvironment
+ :keyword query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL
+ database data sources, specified in the format "hh:mm:ss".
+ :paramtype query_timeout: str
+ """
super(IndexingParametersConfiguration, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.parsing_mode = parsing_mode
@@ -2716,10 +3465,10 @@ class IndexingSchedule(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword interval: Required. The interval of time between indexer executions.
- :paramtype interval: ~datetime.timedelta
- :keyword start_time: The time when an indexer should start running.
- :paramtype start_time: ~datetime.datetime
+ :ivar interval: Required. The interval of time between indexer executions.
+ :vartype interval: ~datetime.timedelta
+ :ivar start_time: The time when an indexer should start running.
+ :vartype start_time: ~datetime.datetime
"""
_validation = {
@@ -2738,6 +3487,12 @@ def __init__(
start_time: Optional[datetime.datetime] = None,
**kwargs
):
+ """
+ :keyword interval: Required. The interval of time between indexer executions.
+ :paramtype interval: ~datetime.timedelta
+ :keyword start_time: The time when an indexer should start running.
+ :paramtype start_time: ~datetime.datetime
+ """
super(IndexingSchedule, self).__init__(**kwargs)
self.interval = interval
self.start_time = start_time
@@ -2748,14 +3503,14 @@ class InputFieldMappingEntry(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the input.
- :paramtype name: str
- :keyword source: The source of the input.
- :paramtype source: str
- :keyword source_context: The source context used for selecting recursive inputs.
- :paramtype source_context: str
- :keyword inputs: The recursive inputs used when creating a complex type.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar name: Required. The name of the input.
+ :vartype name: str
+ :ivar source: The source of the input.
+ :vartype source: str
+ :ivar source_context: The source context used for selecting recursive inputs.
+ :vartype source_context: str
+ :ivar inputs: The recursive inputs used when creating a complex type.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
"""
_validation = {
@@ -2778,6 +3533,16 @@ def __init__(
inputs: Optional[List["InputFieldMappingEntry"]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the input.
+ :paramtype name: str
+ :keyword source: The source of the input.
+ :paramtype source: str
+ :keyword source_context: The source context used for selecting recursive inputs.
+ :paramtype source_context: str
+ :keyword inputs: The recursive inputs used when creating a complex type.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ """
super(InputFieldMappingEntry, self).__init__(**kwargs)
self.name = name
self.source = source
@@ -2790,18 +3555,18 @@ class KeepTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword keep_words: Required. The list of words to keep.
- :paramtype keep_words: list[str]
- :keyword lower_case_keep_words: A value indicating whether to lower case all words first.
- Default is false.
- :paramtype lower_case_keep_words: bool
+ :vartype name: str
+ :ivar keep_words: Required. The list of words to keep.
+ :vartype keep_words: list[str]
+ :ivar lower_case_keep_words: A value indicating whether to lower case all words first. Default
+ is false.
+ :vartype lower_case_keep_words: bool
"""
_validation = {
@@ -2825,6 +3590,17 @@ def __init__(
lower_case_keep_words: Optional[bool] = False,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword keep_words: Required. The list of words to keep.
+ :paramtype keep_words: list[str]
+ :keyword lower_case_keep_words: A value indicating whether to lower case all words first.
+ Default is false.
+ :paramtype lower_case_keep_words: bool
+ """
super(KeepTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.KeepTokenFilter' # type: str
self.keep_words = keep_words
@@ -2836,38 +3612,37 @@ class KeyPhraseExtractionSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl",
"pt-PT", "pt-BR", "ru", "es", "sv".
- :paramtype default_language_code: str or
+ :vartype default_language_code: str or
~azure.search.documents.indexes.models.KeyPhraseExtractionSkillLanguage
- :keyword max_key_phrase_count: A number indicating how many key phrases to return. If absent,
- all identified key phrases will be returned.
- :paramtype max_key_phrase_count: int
- :keyword model_version: The version of the model to use when calling the Text Analytics
- service. It will default to the latest available when not specified. We recommend you do not
- specify this value unless absolutely necessary.
- :paramtype model_version: str
+ :ivar max_key_phrase_count: A number indicating how many key phrases to return. If absent, all
+ identified key phrases will be returned.
+ :vartype max_key_phrase_count: int
+ :ivar model_version: The version of the model to use when calling the Text Analytics service.
+ It will default to the latest available when not specified. We recommend you do not specify
+ this value unless absolutely necessary.
+ :vartype model_version: str
"""
_validation = {
@@ -2901,6 +3676,37 @@ def __init__(
model_version: Optional[str] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl",
+ "pt-PT", "pt-BR", "ru", "es", "sv".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.KeyPhraseExtractionSkillLanguage
+ :keyword max_key_phrase_count: A number indicating how many key phrases to return. If absent,
+ all identified key phrases will be returned.
+ :paramtype max_key_phrase_count: int
+ :keyword model_version: The version of the model to use when calling the Text Analytics
+ service. It will default to the latest available when not specified. We recommend you do not
+ specify this value unless absolutely necessary.
+ :paramtype model_version: str
+ """
super(KeyPhraseExtractionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Text.KeyPhraseExtractionSkill' # type: str
self.default_language_code = default_language_code
@@ -2913,18 +3719,18 @@ class KeywordMarkerTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword keywords: Required. A list of words to mark as keywords.
- :paramtype keywords: list[str]
- :keyword ignore_case: A value indicating whether to ignore case. If true, all words are
- converted to lower case first. Default is false.
- :paramtype ignore_case: bool
+ :vartype name: str
+ :ivar keywords: Required. A list of words to mark as keywords.
+ :vartype keywords: list[str]
+ :ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted
+ to lower case first. Default is false.
+ :vartype ignore_case: bool
"""
_validation = {
@@ -2948,6 +3754,17 @@ def __init__(
ignore_case: Optional[bool] = False,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword keywords: Required. A list of words to mark as keywords.
+ :paramtype keywords: list[str]
+ :keyword ignore_case: A value indicating whether to ignore case. If true, all words are
+ converted to lower case first. Default is false.
+ :paramtype ignore_case: bool
+ """
super(KeywordMarkerTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' # type: str
self.keywords = keywords
@@ -2959,15 +3776,15 @@ class KeywordTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword buffer_size: The read buffer size in bytes. Default is 256.
- :paramtype buffer_size: int
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar buffer_size: The read buffer size in bytes. Default is 256.
+ :vartype buffer_size: int
"""
_validation = {
@@ -2988,6 +3805,14 @@ def __init__(
buffer_size: Optional[int] = 256,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword buffer_size: The read buffer size in bytes. Default is 256.
+ :paramtype buffer_size: int
+ """
super(KeywordTokenizer, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizer' # type: str
self.buffer_size = buffer_size
@@ -2998,16 +3823,16 @@ class KeywordTokenizerV2(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Default is 256. Tokens longer than the
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Default is 256. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
- :paramtype max_token_length: int
+ :vartype max_token_length: int
"""
_validation = {
@@ -3029,6 +3854,15 @@ def __init__(
max_token_length: Optional[int] = 256,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Default is 256. Tokens longer than the
+ maximum length are split. The maximum token length that can be used is 300 characters.
+ :paramtype max_token_length: int
+ """
super(KeywordTokenizerV2, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizerV2' # type: str
self.max_token_length = max_token_length
@@ -3039,33 +3873,32 @@ class LanguageDetectionSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_country_hint: A country code to use as a hint to the language detection model
- if it cannot disambiguate the language.
- :paramtype default_country_hint: str
- :keyword model_version: The version of the model to use when calling the Text Analytics
- service. It will default to the latest available when not specified. We recommend you do not
- specify this value unless absolutely necessary.
- :paramtype model_version: str
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_country_hint: A country code to use as a hint to the language detection model if
+ it cannot disambiguate the language.
+ :vartype default_country_hint: str
+ :ivar model_version: The version of the model to use when calling the Text Analytics service.
+ It will default to the latest available when not specified. We recommend you do not specify
+ this value unless absolutely necessary.
+ :vartype model_version: str
"""
_validation = {
@@ -3097,6 +3930,32 @@ def __init__(
model_version: Optional[str] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_country_hint: A country code to use as a hint to the language detection model
+ if it cannot disambiguate the language.
+ :paramtype default_country_hint: str
+ :keyword model_version: The version of the model to use when calling the Text Analytics
+ service. It will default to the latest available when not specified. We recommend you do not
+ specify this value unless absolutely necessary.
+ :paramtype model_version: str
+ """
super(LanguageDetectionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Text.LanguageDetectionSkill' # type: str
self.default_country_hint = default_country_hint
@@ -3108,18 +3967,18 @@ class LengthTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be
- less than the value of max.
- :paramtype min_length: int
- :keyword max_length: The maximum length in characters. Default and maximum is 300.
- :paramtype max_length: int
+ :vartype name: str
+ :ivar min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be less
+ than the value of max.
+ :vartype min_length: int
+ :ivar max_length: The maximum length in characters. Default and maximum is 300.
+ :vartype max_length: int
"""
_validation = {
@@ -3144,6 +4003,17 @@ def __init__(
max_length: Optional[int] = 300,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be
+ less than the value of max.
+ :paramtype min_length: int
+ :keyword max_length: The maximum length in characters. Default and maximum is 300.
+ :paramtype max_length: int
+ """
super(LengthTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.LengthTokenFilter' # type: str
self.min_length = min_length
@@ -3155,18 +4025,18 @@ class LimitTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword max_token_count: The maximum number of tokens to produce. Default is 1.
- :paramtype max_token_count: int
- :keyword consume_all_tokens: A value indicating whether all tokens from the input must be
- consumed even if maxTokenCount is reached. Default is false.
- :paramtype consume_all_tokens: bool
+ :vartype name: str
+ :ivar max_token_count: The maximum number of tokens to produce. Default is 1.
+ :vartype max_token_count: int
+ :ivar consume_all_tokens: A value indicating whether all tokens from the input must be consumed
+ even if maxTokenCount is reached. Default is false.
+ :vartype consume_all_tokens: bool
"""
_validation = {
@@ -3189,6 +4059,17 @@ def __init__(
consume_all_tokens: Optional[bool] = False,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_count: The maximum number of tokens to produce. Default is 1.
+ :paramtype max_token_count: int
+ :keyword consume_all_tokens: A value indicating whether all tokens from the input must be
+ consumed even if maxTokenCount is reached. Default is false.
+ :paramtype consume_all_tokens: bool
+ """
super(LimitTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.LimitTokenFilter' # type: str
self.max_token_count = max_token_count
@@ -3218,6 +4099,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(ListDataSourcesResult, self).__init__(**kwargs)
self.data_sources = None
@@ -3245,6 +4128,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(ListIndexersResult, self).__init__(**kwargs)
self.indexers = None
@@ -3272,6 +4157,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(ListIndexesResult, self).__init__(**kwargs)
self.indexes = None
@@ -3299,6 +4186,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(ListSkillsetsResult, self).__init__(**kwargs)
self.skillsets = None
@@ -3326,6 +4215,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(ListSynonymMapsResult, self).__init__(**kwargs)
self.synonym_maps = None
@@ -3335,18 +4226,18 @@ class LuceneStandardAnalyzer(LexicalAnalyzer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ :vartype odata_type: str
+ :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
- :paramtype max_token_length: int
- :keyword stopwords: A list of stopwords.
- :paramtype stopwords: list[str]
+ :vartype max_token_length: int
+ :ivar stopwords: A list of stopwords.
+ :vartype stopwords: list[str]
"""
_validation = {
@@ -3370,6 +4261,17 @@ def __init__(
stopwords: Optional[List[str]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ maximum length are split. The maximum token length that can be used is 300 characters.
+ :paramtype max_token_length: int
+ :keyword stopwords: A list of stopwords.
+ :paramtype stopwords: list[str]
+ """
super(LuceneStandardAnalyzer, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' # type: str
self.max_token_length = max_token_length
@@ -3381,16 +4283,16 @@ class LuceneStandardTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split.
- :paramtype max_token_length: int
+ :vartype max_token_length: int
"""
_validation = {
@@ -3411,6 +4313,15 @@ def __init__(
max_token_length: Optional[int] = 255,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ maximum length are split.
+ :paramtype max_token_length: int
+ """
super(LuceneStandardTokenizer, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' # type: str
self.max_token_length = max_token_length
@@ -3421,16 +4332,16 @@ class LuceneStandardTokenizerV2(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
- :paramtype max_token_length: int
+ :vartype max_token_length: int
"""
_validation = {
@@ -3452,6 +4363,15 @@ def __init__(
max_token_length: Optional[int] = 255,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ maximum length are split. The maximum token length that can be used is 300 characters.
+ :paramtype max_token_length: int
+ """
super(LuceneStandardTokenizerV2, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' # type: str
self.max_token_length = max_token_length
@@ -3462,21 +4382,21 @@ class MagnitudeScoringFunction(ScoringFunction):
All required parameters must be populated in order to send to Azure.
- :keyword type: Required. Indicates the type of function to use. Valid values include magnitude,
+ :ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
- :paramtype type: str
- :keyword field_name: Required. The name of the field used as input to the scoring function.
- :paramtype field_name: str
- :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
- to 1.0.
- :paramtype boost: float
- :keyword interpolation: A value indicating how boosting will be interpolated across document
+ :vartype type: str
+ :ivar field_name: Required. The name of the field used as input to the scoring function.
+ :vartype field_name: str
+ :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
+ 1.0.
+ :vartype boost: float
+ :ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
- :paramtype interpolation: str or
+ :vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
- :keyword parameters: Required. Parameter values for the magnitude scoring function.
- :paramtype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters
+ :ivar parameters: Required. Parameter values for the magnitude scoring function.
+ :vartype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters
"""
_validation = {
@@ -3503,6 +4423,20 @@ def __init__(
interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None,
**kwargs
):
+ """
+ :keyword field_name: Required. The name of the field used as input to the scoring function.
+ :paramtype field_name: str
+ :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
+ to 1.0.
+ :paramtype boost: float
+ :keyword interpolation: A value indicating how boosting will be interpolated across document
+ scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
+ "logarithmic".
+ :paramtype interpolation: str or
+ ~azure.search.documents.indexes.models.ScoringFunctionInterpolation
+ :keyword parameters: Required. Parameter values for the magnitude scoring function.
+ :paramtype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters
+ """
super(MagnitudeScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs)
self.type = 'magnitude' # type: str
self.parameters = parameters
@@ -3513,13 +4447,13 @@ class MagnitudeScoringParameters(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword boosting_range_start: Required. The field value at which boosting starts.
- :paramtype boosting_range_start: float
- :keyword boosting_range_end: Required. The field value at which boosting ends.
- :paramtype boosting_range_end: float
- :keyword should_boost_beyond_range_by_constant: A value indicating whether to apply a constant
+ :ivar boosting_range_start: Required. The field value at which boosting starts.
+ :vartype boosting_range_start: float
+ :ivar boosting_range_end: Required. The field value at which boosting ends.
+ :vartype boosting_range_end: float
+ :ivar should_boost_beyond_range_by_constant: A value indicating whether to apply a constant
boost for field values beyond the range end value; default is false.
- :paramtype should_boost_beyond_range_by_constant: bool
+ :vartype should_boost_beyond_range_by_constant: bool
"""
_validation = {
@@ -3541,6 +4475,15 @@ def __init__(
should_boost_beyond_range_by_constant: Optional[bool] = None,
**kwargs
):
+ """
+ :keyword boosting_range_start: Required. The field value at which boosting starts.
+ :paramtype boosting_range_start: float
+ :keyword boosting_range_end: Required. The field value at which boosting ends.
+ :paramtype boosting_range_end: float
+ :keyword should_boost_beyond_range_by_constant: A value indicating whether to apply a constant
+ boost for field values beyond the range end value; default is false.
+ :paramtype should_boost_beyond_range_by_constant: bool
+ """
super(MagnitudeScoringParameters, self).__init__(**kwargs)
self.boosting_range_start = boosting_range_start
self.boosting_range_end = boosting_range_end
@@ -3552,16 +4495,16 @@ class MappingCharFilter(CharFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the char filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the char filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the char filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword mappings: Required. A list of mappings of the following format: "a=>b" (all
- occurrences of the character "a" will be replaced with character "b").
- :paramtype mappings: list[str]
+ :vartype name: str
+ :ivar mappings: Required. A list of mappings of the following format: "a=>b" (all occurrences
+ of the character "a" will be replaced with character "b").
+ :vartype mappings: list[str]
"""
_validation = {
@@ -3583,6 +4526,15 @@ def __init__(
mappings: List[str],
**kwargs
):
+ """
+ :keyword name: Required. The name of the char filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword mappings: Required. A list of mappings of the following format: "a=>b" (all
+ occurrences of the character "a" will be replaced with character "b").
+ :paramtype mappings: list[str]
+ """
super(MappingCharFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.MappingCharFilter' # type: str
self.mappings = mappings
@@ -3593,32 +4545,31 @@ class MergeSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is
- an empty space.
- :paramtype insert_pre_tag: str
- :keyword insert_post_tag: The tag indicates the end of the merged text. By default, the tag is
- an empty space.
- :paramtype insert_post_tag: str
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is an
+ empty space.
+ :vartype insert_pre_tag: str
+ :ivar insert_post_tag: The tag indicates the end of the merged text. By default, the tag is an
+ empty space.
+ :vartype insert_post_tag: str
"""
_validation = {
@@ -3650,6 +4601,31 @@ def __init__(
insert_post_tag: Optional[str] = " ",
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is
+ an empty space.
+ :paramtype insert_pre_tag: str
+ :keyword insert_post_tag: The tag indicates the end of the merged text. By default, the tag is
+ an empty space.
+ :paramtype insert_post_tag: str
+ """
super(MergeSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Text.MergeSkill' # type: str
self.insert_pre_tag = insert_pre_tag
@@ -3661,29 +4637,29 @@ class MicrosoftLanguageStemmingTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Tokens longer than the maximum length are
split. Maximum token length that can be used is 300 characters. Tokens longer than 300
characters are first split into tokens of length 300 and then each of those tokens is split
based on the max token length set. Default is 255.
- :paramtype max_token_length: int
- :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used
- as the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
- :paramtype is_search_tokenizer: bool
- :keyword language: The language to use. The default is English. Possible values include:
- "arabic", "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english",
+ :vartype max_token_length: int
+ :ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as
+ the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
+ :vartype is_search_tokenizer: bool
+ :ivar language: The language to use. The default is English. Possible values include: "arabic",
+ "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english",
"estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian",
"icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam",
"marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi",
"romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish",
"swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu".
- :paramtype language: str or
+ :vartype language: str or
~azure.search.documents.indexes.models.MicrosoftStemmingTokenizerLanguage
"""
@@ -3710,6 +4686,29 @@ def __init__(
language: Optional[Union[str, "MicrosoftStemmingTokenizerLanguage"]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are
+ split. Maximum token length that can be used is 300 characters. Tokens longer than 300
+ characters are first split into tokens of length 300 and then each of those tokens is split
+ based on the max token length set. Default is 255.
+ :paramtype max_token_length: int
+ :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used
+ as the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
+ :paramtype is_search_tokenizer: bool
+ :keyword language: The language to use. The default is English. Possible values include:
+ "arabic", "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english",
+ "estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian",
+ "icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam",
+ "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi",
+ "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish",
+ "swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu".
+ :paramtype language: str or
+ ~azure.search.documents.indexes.models.MicrosoftStemmingTokenizerLanguage
+ """
super(MicrosoftLanguageStemmingTokenizer, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer' # type: str
self.max_token_length = max_token_length
@@ -3722,29 +4721,29 @@ class MicrosoftLanguageTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Tokens longer than the maximum length are
split. Maximum token length that can be used is 300 characters. Tokens longer than 300
characters are first split into tokens of length 300 and then each of those tokens is split
based on the max token length set. Default is 255.
- :paramtype max_token_length: int
- :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used
- as the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
- :paramtype is_search_tokenizer: bool
- :keyword language: The language to use. The default is English. Possible values include:
- "bangla", "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian",
- "czech", "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi",
- "icelandic", "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam",
- "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi",
- "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish",
- "tamil", "telugu", "thai", "ukrainian", "urdu", "vietnamese".
- :paramtype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage
+ :vartype max_token_length: int
+ :ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as
+ the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
+ :vartype is_search_tokenizer: bool
+ :ivar language: The language to use. The default is English. Possible values include: "bangla",
+ "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", "czech",
+ "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", "icelandic",
+ "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", "marathi",
+ "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", "romanian",
+ "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", "tamil",
+ "telugu", "thai", "ukrainian", "urdu", "vietnamese".
+ :vartype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage
"""
_validation = {
@@ -3770,6 +4769,28 @@ def __init__(
language: Optional[Union[str, "MicrosoftTokenizerLanguage"]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are
+ split. Maximum token length that can be used is 300 characters. Tokens longer than 300
+ characters are first split into tokens of length 300 and then each of those tokens is split
+ based on the max token length set. Default is 255.
+ :paramtype max_token_length: int
+ :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used
+ as the search tokenizer, set to false if used as the indexing tokenizer. Default is false.
+ :paramtype is_search_tokenizer: bool
+ :keyword language: The language to use. The default is English. Possible values include:
+ "bangla", "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian",
+ "czech", "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi",
+ "icelandic", "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam",
+ "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi",
+ "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish",
+ "tamil", "telugu", "thai", "ukrainian", "urdu", "vietnamese".
+ :paramtype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage
+ """
super(MicrosoftLanguageTokenizer, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer' # type: str
self.max_token_length = max_token_length
@@ -3782,18 +4803,18 @@ class NGramTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
+ :vartype name: str
+ :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
maxGram.
- :paramtype min_gram: int
- :keyword max_gram: The maximum n-gram length. Default is 2.
- :paramtype max_gram: int
+ :vartype min_gram: int
+ :ivar max_gram: The maximum n-gram length. Default is 2.
+ :vartype max_gram: int
"""
_validation = {
@@ -3816,6 +4837,17 @@ def __init__(
max_gram: Optional[int] = 2,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of
+ maxGram.
+ :paramtype min_gram: int
+ :keyword max_gram: The maximum n-gram length. Default is 2.
+ :paramtype max_gram: int
+ """
super(NGramTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilter' # type: str
self.min_gram = min_gram
@@ -3827,18 +4859,18 @@ class NGramTokenFilterV2(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
- the value of maxGram.
- :paramtype min_gram: int
- :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
- :paramtype max_gram: int
+ :vartype name: str
+ :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the
+ value of maxGram.
+ :vartype min_gram: int
+ :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :vartype max_gram: int
"""
_validation = {
@@ -3863,6 +4895,17 @@ def __init__(
max_gram: Optional[int] = 2,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
+ the value of maxGram.
+ :paramtype min_gram: int
+ :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :paramtype max_gram: int
+ """
super(NGramTokenFilterV2, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilterV2' # type: str
self.min_gram = min_gram
@@ -3874,20 +4917,20 @@ class NGramTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
- the value of maxGram.
- :paramtype min_gram: int
- :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
- :paramtype max_gram: int
- :keyword token_chars: Character classes to keep in the tokens.
- :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the
+ value of maxGram.
+ :vartype min_gram: int
+ :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :vartype max_gram: int
+ :ivar token_chars: Character classes to keep in the tokens.
+ :vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
"""
_validation = {
@@ -3914,6 +4957,19 @@ def __init__(
token_chars: Optional[List[Union[str, "TokenCharacterKind"]]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than
+ the value of maxGram.
+ :paramtype min_gram: int
+ :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300.
+ :paramtype max_gram: int
+ :keyword token_chars: Character classes to keep in the tokens.
+ :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind]
+ """
super(NGramTokenizer, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.NGramTokenizer' # type: str
self.min_gram = min_gram
@@ -3926,39 +4982,37 @@ class OcrSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el",
"hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl",
"sr-Latn", "sk".
- :paramtype default_language_code: str or
- ~azure.search.documents.indexes.models.OcrSkillLanguage
- :keyword should_detect_orientation: A value indicating to turn orientation detection on or not.
+ :vartype default_language_code: str or ~azure.search.documents.indexes.models.OcrSkillLanguage
+ :ivar should_detect_orientation: A value indicating to turn orientation detection on or not.
Default is false.
- :paramtype should_detect_orientation: bool
- :keyword line_ending: Defines the sequence of characters to use between the lines of text
+ :vartype should_detect_orientation: bool
+ :ivar line_ending: Defines the sequence of characters to use between the lines of text
recognized by the OCR skill. The default value is "space". Possible values include: "space",
"carriageReturn", "lineFeed", "carriageReturnLineFeed".
- :paramtype line_ending: str or ~azure.search.documents.indexes.models.LineEnding
+ :vartype line_ending: str or ~azure.search.documents.indexes.models.LineEnding
"""
_validation = {
@@ -3992,6 +5046,38 @@ def __init__(
line_ending: Optional[Union[str, "LineEnding"]] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el",
+ "hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl",
+ "sr-Latn", "sk".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.OcrSkillLanguage
+ :keyword should_detect_orientation: A value indicating to turn orientation detection on or not.
+ Default is false.
+ :paramtype should_detect_orientation: bool
+ :keyword line_ending: Defines the sequence of characters to use between the lines of text
+ recognized by the OCR skill. The default value is "space". Possible values include: "space",
+ "carriageReturn", "lineFeed", "carriageReturnLineFeed".
+ :paramtype line_ending: str or ~azure.search.documents.indexes.models.LineEnding
+ """
super(OcrSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Vision.OcrSkill' # type: str
self.default_language_code = default_language_code
@@ -4004,10 +5090,10 @@ class OutputFieldMappingEntry(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the output defined by the skill.
- :paramtype name: str
- :keyword target_name: The target name of the output. It is optional and default to name.
- :paramtype target_name: str
+ :ivar name: Required. The name of the output defined by the skill.
+ :vartype name: str
+ :ivar target_name: The target name of the output. It is optional and default to name.
+ :vartype target_name: str
"""
_validation = {
@@ -4026,6 +5112,12 @@ def __init__(
target_name: Optional[str] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the output defined by the skill.
+ :paramtype name: str
+ :keyword target_name: The target name of the output. It is optional and default to name.
+ :paramtype target_name: str
+ """
super(OutputFieldMappingEntry, self).__init__(**kwargs)
self.name = name
self.target_name = target_name
@@ -4036,24 +5128,24 @@ class PathHierarchyTokenizerV2(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword delimiter: The delimiter character to use. Default is "/".
- :paramtype delimiter: str
- :keyword replacement: A value that, if set, replaces the delimiter character. Default is "/".
- :paramtype replacement: str
- :keyword max_token_length: The maximum token length. Default and maximum is 300.
- :paramtype max_token_length: int
- :keyword reverse_token_order: A value indicating whether to generate tokens in reverse order.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar delimiter: The delimiter character to use. Default is "/".
+ :vartype delimiter: str
+ :ivar replacement: A value that, if set, replaces the delimiter character. Default is "/".
+ :vartype replacement: str
+ :ivar max_token_length: The maximum token length. Default and maximum is 300.
+ :vartype max_token_length: int
+ :ivar reverse_token_order: A value indicating whether to generate tokens in reverse order.
Default is false.
- :paramtype reverse_token_order: bool
- :keyword number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0.
- :paramtype number_of_tokens_to_skip: int
+ :vartype reverse_token_order: bool
+ :ivar number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0.
+ :vartype number_of_tokens_to_skip: int
"""
_validation = {
@@ -4083,6 +5175,23 @@ def __init__(
number_of_tokens_to_skip: Optional[int] = 0,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword delimiter: The delimiter character to use. Default is "/".
+ :paramtype delimiter: str
+ :keyword replacement: A value that, if set, replaces the delimiter character. Default is "/".
+ :paramtype replacement: str
+ :keyword max_token_length: The maximum token length. Default and maximum is 300.
+ :paramtype max_token_length: int
+ :keyword reverse_token_order: A value indicating whether to generate tokens in reverse order.
+ Default is false.
+ :paramtype reverse_token_order: bool
+ :keyword number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0.
+ :paramtype number_of_tokens_to_skip: int
+ """
super(PathHierarchyTokenizerV2, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.PathHierarchyTokenizerV2' # type: str
self.delimiter = delimiter
@@ -4092,55 +5201,29 @@ def __init__(
self.number_of_tokens_to_skip = number_of_tokens_to_skip
-class Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
- """Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema.
-
- :keyword document_keys: document keys to be reset.
- :paramtype document_keys: list[str]
- :keyword datasource_document_ids: datasource document identifiers to be reset.
- :paramtype datasource_document_ids: list[str]
- """
-
- _attribute_map = {
- 'document_keys': {'key': 'documentKeys', 'type': '[str]'},
- 'datasource_document_ids': {'key': 'datasourceDocumentIds', 'type': '[str]'},
- }
-
- def __init__(
- self,
- *,
- document_keys: Optional[List[str]] = None,
- datasource_document_ids: Optional[List[str]] = None,
- **kwargs
- ):
- super(Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
- self.document_keys = document_keys
- self.datasource_document_ids = datasource_document_ids
-
-
class PatternAnalyzer(LexicalAnalyzer):
"""Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword lower_case_terms: A value indicating whether terms should be lower-cased. Default is
+ :vartype odata_type: str
+ :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar lower_case_terms: A value indicating whether terms should be lower-cased. Default is
true.
- :paramtype lower_case_terms: bool
- :keyword pattern: A regular expression pattern to match token separators. Default is an
- expression that matches one or more non-word characters.
- :paramtype pattern: str
- :keyword flags: Regular expression flags. Possible values include: "CANON_EQ",
- "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
- :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags
- :keyword stopwords: A list of stopwords.
- :paramtype stopwords: list[str]
+ :vartype lower_case_terms: bool
+ :ivar pattern: A regular expression pattern to match token separators. Default is an expression
+ that matches one or more non-word characters.
+ :vartype pattern: str
+ :ivar flags: Regular expression flags. Possible values include: "CANON_EQ", "CASE_INSENSITIVE",
+ "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
+ :vartype flags: str or ~azure.search.documents.indexes.models.RegexFlags
+ :ivar stopwords: A list of stopwords.
+ :vartype stopwords: list[str]
"""
_validation = {
@@ -4167,6 +5250,23 @@ def __init__(
stopwords: Optional[List[str]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword lower_case_terms: A value indicating whether terms should be lower-cased. Default is
+ true.
+ :paramtype lower_case_terms: bool
+ :keyword pattern: A regular expression pattern to match token separators. Default is an
+ expression that matches one or more non-word characters.
+ :paramtype pattern: str
+ :keyword flags: Regular expression flags. Possible values include: "CANON_EQ",
+ "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
+ :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags
+ :keyword stopwords: A list of stopwords.
+ :paramtype stopwords: list[str]
+ """
super(PatternAnalyzer, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternAnalyzer' # type: str
self.lower_case_terms = lower_case_terms
@@ -4180,18 +5280,18 @@ class PatternCaptureTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword patterns: Required. A list of patterns to match against each token.
- :paramtype patterns: list[str]
- :keyword preserve_original: A value indicating whether to return the original token even if one
- of the patterns matches. Default is true.
- :paramtype preserve_original: bool
+ :vartype name: str
+ :ivar patterns: Required. A list of patterns to match against each token.
+ :vartype patterns: list[str]
+ :ivar preserve_original: A value indicating whether to return the original token even if one of
+ the patterns matches. Default is true.
+ :vartype preserve_original: bool
"""
_validation = {
@@ -4215,6 +5315,17 @@ def __init__(
preserve_original: Optional[bool] = True,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword patterns: Required. A list of patterns to match against each token.
+ :paramtype patterns: list[str]
+ :keyword preserve_original: A value indicating whether to return the original token even if one
+ of the patterns matches. Default is true.
+ :paramtype preserve_original: bool
+ """
super(PatternCaptureTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternCaptureTokenFilter' # type: str
self.patterns = patterns
@@ -4226,17 +5337,17 @@ class PatternReplaceCharFilter(CharFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the char filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the char filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the char filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the char filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword pattern: Required. A regular expression pattern.
- :paramtype pattern: str
- :keyword replacement: Required. The replacement text.
- :paramtype replacement: str
+ :vartype name: str
+ :ivar pattern: Required. A regular expression pattern.
+ :vartype pattern: str
+ :ivar replacement: Required. The replacement text.
+ :vartype replacement: str
"""
_validation = {
@@ -4261,6 +5372,16 @@ def __init__(
replacement: str,
**kwargs
):
+ """
+ :keyword name: Required. The name of the char filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword pattern: Required. A regular expression pattern.
+ :paramtype pattern: str
+ :keyword replacement: Required. The replacement text.
+ :paramtype replacement: str
+ """
super(PatternReplaceCharFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternReplaceCharFilter' # type: str
self.pattern = pattern
@@ -4272,17 +5393,17 @@ class PatternReplaceTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword pattern: Required. A regular expression pattern.
- :paramtype pattern: str
- :keyword replacement: Required. The replacement text.
- :paramtype replacement: str
+ :vartype name: str
+ :ivar pattern: Required. A regular expression pattern.
+ :vartype pattern: str
+ :ivar replacement: Required. The replacement text.
+ :vartype replacement: str
"""
_validation = {
@@ -4307,6 +5428,16 @@ def __init__(
replacement: str,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword pattern: Required. A regular expression pattern.
+ :paramtype pattern: str
+ :keyword replacement: Required. The replacement text.
+ :paramtype replacement: str
+ """
super(PatternReplaceTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' # type: str
self.pattern = pattern
@@ -4318,23 +5449,23 @@ class PatternTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword pattern: A regular expression pattern to match token separators. Default is an
- expression that matches one or more non-word characters.
- :paramtype pattern: str
- :keyword flags: Regular expression flags. Possible values include: "CANON_EQ",
- "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
- :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags
- :keyword group: The zero-based ordinal of the matching group in the regular expression pattern
- to extract into tokens. Use -1 if you want to use the entire pattern to split the input into
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar pattern: A regular expression pattern to match token separators. Default is an expression
+ that matches one or more non-word characters.
+ :vartype pattern: str
+ :ivar flags: Regular expression flags. Possible values include: "CANON_EQ", "CASE_INSENSITIVE",
+ "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
+ :vartype flags: str or ~azure.search.documents.indexes.models.RegexFlags
+ :ivar group: The zero-based ordinal of the matching group in the regular expression pattern to
+ extract into tokens. Use -1 if you want to use the entire pattern to split the input into
tokens, irrespective of matching groups. Default is -1.
- :paramtype group: int
+ :vartype group: int
"""
_validation = {
@@ -4359,6 +5490,22 @@ def __init__(
group: Optional[int] = -1,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword pattern: A regular expression pattern to match token separators. Default is an
+ expression that matches one or more non-word characters.
+ :paramtype pattern: str
+ :keyword flags: Regular expression flags. Possible values include: "CANON_EQ",
+ "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES".
+ :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags
+ :keyword group: The zero-based ordinal of the matching group in the regular expression pattern
+ to extract into tokens. Use -1 if you want to use the entire pattern to split the input into
+ tokens, irrespective of matching groups. Default is -1.
+ :paramtype group: int
+ """
super(PatternTokenizer, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.PatternTokenizer' # type: str
self.pattern = pattern
@@ -4371,20 +5518,20 @@ class PhoneticTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword encoder: The phonetic encoder to use. Default is "metaphone". Possible values include:
+ :vartype name: str
+ :ivar encoder: The phonetic encoder to use. Default is "metaphone". Possible values include:
"metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2",
"cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse".
- :paramtype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder
- :keyword replace_original_tokens: A value indicating whether encoded tokens should replace
+ :vartype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder
+ :ivar replace_original_tokens: A value indicating whether encoded tokens should replace
original tokens. If false, encoded tokens are added as synonyms. Default is true.
- :paramtype replace_original_tokens: bool
+ :vartype replace_original_tokens: bool
"""
_validation = {
@@ -4407,6 +5554,19 @@ def __init__(
replace_original_tokens: Optional[bool] = True,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword encoder: The phonetic encoder to use. Default is "metaphone". Possible values include:
+ "metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2",
+ "cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse".
+ :paramtype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder
+ :keyword replace_original_tokens: A value indicating whether encoded tokens should replace
+ original tokens. If false, encoded tokens are added as synonyms. Default is true.
+ :paramtype replace_original_tokens: bool
+ """
super(PhoneticTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.PhoneticTokenFilter' # type: str
self.encoder = encoder
@@ -4418,48 +5578,47 @@ class PIIDetectionSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
- :paramtype default_language_code: str
- :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
+ :vartype default_language_code: str
+ :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose
confidence score is greater than the value specified. If not set (default), or if explicitly
set to null, all entities will be included.
- :paramtype minimum_precision: float
- :keyword masking_mode: A parameter that provides various ways to mask the personal information
+ :vartype minimum_precision: float
+ :ivar masking_mode: A parameter that provides various ways to mask the personal information
detected in the input text. Default is 'none'. Possible values include: "none", "replace".
- :paramtype masking_mode: str or
+ :vartype masking_mode: str or
~azure.search.documents.indexes.models.PIIDetectionSkillMaskingMode
- :keyword masking_character: The character used to mask the text if the maskingMode parameter is
+ :ivar masking_character: The character used to mask the text if the maskingMode parameter is
set to replace. Default is '*'.
- :paramtype masking_character: str
- :keyword model_version: The version of the model to use when calling the Text Analytics
- service. It will default to the latest available when not specified. We recommend you do not
- specify this value unless absolutely necessary.
- :paramtype model_version: str
- :keyword pii_categories: A list of PII entity categories that should be extracted and masked.
- :paramtype pii_categories: list[str]
- :keyword domain: If specified, will set the PII domain to include only a subset of the entity
+ :vartype masking_character: str
+ :ivar model_version: The version of the model to use when calling the Text Analytics service.
+ It will default to the latest available when not specified. We recommend you do not specify
+ this value unless absolutely necessary.
+ :vartype model_version: str
+ :ivar pii_categories: A list of PII entity categories that should be extracted and masked.
+ :vartype pii_categories: list[str]
+ :ivar domain: If specified, will set the PII domain to include only a subset of the entity
categories. Possible values include: 'phi', 'none'. Default is 'none'.
- :paramtype domain: str
+ :vartype domain: str
"""
_validation = {
@@ -4503,6 +5662,47 @@ def __init__(
domain: Optional[str] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :paramtype default_language_code: str
+ :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose
+ confidence score is greater than the value specified. If not set (default), or if explicitly
+ set to null, all entities will be included.
+ :paramtype minimum_precision: float
+ :keyword masking_mode: A parameter that provides various ways to mask the personal information
+ detected in the input text. Default is 'none'. Possible values include: "none", "replace".
+ :paramtype masking_mode: str or
+ ~azure.search.documents.indexes.models.PIIDetectionSkillMaskingMode
+ :keyword masking_character: The character used to mask the text if the maskingMode parameter is
+ set to replace. Default is '*'.
+ :paramtype masking_character: str
+ :keyword model_version: The version of the model to use when calling the Text Analytics
+ service. It will default to the latest available when not specified. We recommend you do not
+ specify this value unless absolutely necessary.
+ :paramtype model_version: str
+ :keyword pii_categories: A list of PII entity categories that should be extracted and masked.
+ :paramtype pii_categories: list[str]
+ :keyword domain: If specified, will set the PII domain to include only a subset of the entity
+ categories. Possible values include: 'phi', 'none'. Default is 'none'.
+ :paramtype domain: str
+ """
super(PIIDetectionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Text.PIIDetectionSkill' # type: str
self.default_language_code = default_language_code
@@ -4517,8 +5717,8 @@ def __init__(
class RequestOptions(msrest.serialization.Model):
"""Parameter group.
- :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
- :paramtype x_ms_client_request_id: str
+ :ivar x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
+ :vartype x_ms_client_request_id: str
"""
_attribute_map = {
@@ -4531,6 +5731,10 @@ def __init__(
x_ms_client_request_id: Optional[str] = None,
**kwargs
):
+ """
+ :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging.
+ :paramtype x_ms_client_request_id: str
+ """
super(RequestOptions, self).__init__(**kwargs)
self.x_ms_client_request_id = x_ms_client_request_id
@@ -4540,10 +5744,10 @@ class ResourceCounter(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword usage: Required. The resource usage amount.
- :paramtype usage: long
- :keyword quota: The resource amount quota.
- :paramtype quota: long
+ :ivar usage: Required. The resource usage amount.
+ :vartype usage: long
+ :ivar quota: The resource amount quota.
+ :vartype quota: long
"""
_validation = {
@@ -4562,6 +5766,12 @@ def __init__(
quota: Optional[int] = None,
**kwargs
):
+ """
+ :keyword usage: Required. The resource usage amount.
+ :paramtype usage: long
+ :keyword quota: The resource amount quota.
+ :paramtype quota: long
+ """
super(ResourceCounter, self).__init__(**kwargs)
self.usage = usage
self.quota = quota
@@ -4572,17 +5782,17 @@ class ScoringProfile(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the scoring profile.
- :paramtype name: str
- :keyword text_weights: Parameters that boost scoring based on text matches in certain index
+ :ivar name: Required. The name of the scoring profile.
+ :vartype name: str
+ :ivar text_weights: Parameters that boost scoring based on text matches in certain index
fields.
- :paramtype text_weights: ~azure.search.documents.indexes.models.TextWeights
- :keyword functions: The collection of functions that influence the scoring of documents.
- :paramtype functions: list[~azure.search.documents.indexes.models.ScoringFunction]
- :keyword function_aggregation: A value indicating how the results of individual scoring
- functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions.
- Possible values include: "sum", "average", "minimum", "maximum", "firstMatching".
- :paramtype function_aggregation: str or
+ :vartype text_weights: ~azure.search.documents.indexes.models.TextWeights
+ :ivar functions: The collection of functions that influence the scoring of documents.
+ :vartype functions: list[~azure.search.documents.indexes.models.ScoringFunction]
+ :ivar function_aggregation: A value indicating how the results of individual scoring functions
+ should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Possible
+ values include: "sum", "average", "minimum", "maximum", "firstMatching".
+ :vartype function_aggregation: str or
~azure.search.documents.indexes.models.ScoringFunctionAggregation
"""
@@ -4606,6 +5816,20 @@ def __init__(
function_aggregation: Optional[Union[str, "ScoringFunctionAggregation"]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the scoring profile.
+ :paramtype name: str
+ :keyword text_weights: Parameters that boost scoring based on text matches in certain index
+ fields.
+ :paramtype text_weights: ~azure.search.documents.indexes.models.TextWeights
+ :keyword functions: The collection of functions that influence the scoring of documents.
+ :paramtype functions: list[~azure.search.documents.indexes.models.ScoringFunction]
+ :keyword function_aggregation: A value indicating how the results of individual scoring
+ functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions.
+ Possible values include: "sum", "average", "minimum", "maximum", "firstMatching".
+ :paramtype function_aggregation: str or
+ ~azure.search.documents.indexes.models.ScoringFunctionAggregation
+ """
super(ScoringProfile, self).__init__(**kwargs)
self.name = name
self.text_weights = text_weights
@@ -4644,6 +5868,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchError, self).__init__(**kwargs)
self.code = None
self.message = None
@@ -4655,43 +5881,43 @@ class SearchField(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the field, which must be unique within the fields
- collection of the index or parent field.
- :paramtype name: str
- :keyword type: Required. The data type of the field. Possible values include: "Edm.String",
+ :ivar name: Required. The name of the field, which must be unique within the fields collection
+ of the index or parent field.
+ :vartype name: str
+ :ivar type: Required. The data type of the field. Possible values include: "Edm.String",
"Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset",
"Edm.GeographyPoint", "Edm.ComplexType".
- :paramtype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType
- :keyword key: A value indicating whether the field uniquely identifies documents in the index.
+ :vartype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType
+ :ivar key: A value indicating whether the field uniquely identifies documents in the index.
Exactly one top-level field in each index must be chosen as the key field and it must be of
type Edm.String. Key fields can be used to look up documents directly and update or delete
specific documents. Default is false for simple fields and null for complex fields.
- :paramtype key: bool
- :keyword retrievable: A value indicating whether the field can be returned in a search result.
- You can disable this option if you want to use a field (for example, margin) as a filter,
- sorting, or scoring mechanism but do not want the field to be visible to the end user. This
- property must be true for key fields, and it must be null for complex fields. This property can
- be changed on existing fields. Enabling this property does not cause any increase in index
- storage requirements. Default is true for simple fields and null for complex fields.
- :paramtype retrievable: bool
- :keyword searchable: A value indicating whether the field is full-text searchable. This means
- it will undergo analysis such as word-breaking during indexing. If you set a searchable field
- to a value like "sunny day", internally it will be split into the individual tokens "sunny" and
+ :vartype key: bool
+ :ivar retrievable: A value indicating whether the field can be returned in a search result. You
+ can disable this option if you want to use a field (for example, margin) as a filter, sorting,
+ or scoring mechanism but do not want the field to be visible to the end user. This property
+ must be true for key fields, and it must be null for complex fields. This property can be
+ changed on existing fields. Enabling this property does not cause any increase in index storage
+ requirements. Default is true for simple fields and null for complex fields.
+ :vartype retrievable: bool
+ :ivar searchable: A value indicating whether the field is full-text searchable. This means it
+ will undergo analysis such as word-breaking during indexing. If you set a searchable field to a
+ value like "sunny day", internally it will be split into the individual tokens "sunny" and
"day". This enables full-text searches for these terms. Fields of type Edm.String or
Collection(Edm.String) are searchable by default. This property must be false for simple fields
of other non-string data types, and it must be null for complex fields. Note: searchable fields
consume extra space in your index since Azure Cognitive Search will store an additional
tokenized version of the field value for full-text searches. If you want to save space in your
index and you don't need a field to be included in searches, set searchable to false.
- :paramtype searchable: bool
- :keyword filterable: A value indicating whether to enable the field to be referenced in $filter
+ :vartype searchable: bool
+ :ivar filterable: A value indicating whether to enable the field to be referenced in $filter
queries. filterable differs from searchable in how strings are handled. Fields of type
Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so
comparisons are for exact matches only. For example, if you set such a field f to "sunny day",
$filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property
must be null for complex fields. Default is true for simple fields and null for complex fields.
- :paramtype filterable: bool
- :keyword sortable: A value indicating whether to enable the field to be referenced in $orderby
+ :vartype filterable: bool
+ :ivar sortable: A value indicating whether to enable the field to be referenced in $orderby
expressions. By default Azure Cognitive Search sorts results by score, but in many experiences
users will want to sort by fields in the documents. A simple field can be sortable only if it
is single-valued (it has a single value in the scope of the parent document). Simple collection
@@ -4701,15 +5927,15 @@ class SearchField(msrest.serialization.Model):
cannot be sortable and the sortable property must be null for such fields. The default for
sortable is true for single-valued simple fields, false for multi-valued simple fields, and
null for complex fields.
- :paramtype sortable: bool
- :keyword facetable: A value indicating whether to enable the field to be referenced in facet
+ :vartype sortable: bool
+ :ivar facetable: A value indicating whether to enable the field to be referenced in facet
queries. Typically used in a presentation of search results that includes hit count by category
(for example, search for digital cameras and see hits by brand, by megapixels, by price, and so
on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or
Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple
fields.
- :paramtype facetable: bool
- :keyword analyzer: The name of the analyzer to use for the field. This option can be used only
+ :vartype facetable: bool
+ :ivar analyzer: The name of the analyzer to use for the field. This option can be used only
with searchable fields and it can't be set together with either searchAnalyzer or
indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null
for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene",
@@ -4729,11 +5955,11 @@ class SearchField(msrest.serialization.Model):
"th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft",
"vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern",
"simple", "stop", "whitespace".
- :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
- :keyword search_analyzer: The name of the analyzer used at search time for the field. This
- option can be used only with searchable fields. It must be set together with indexAnalyzer and
- it cannot be set together with the analyzer option. This property cannot be set to the name of
- a language analyzer; use the analyzer property instead if you need a language analyzer. This
+ :vartype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :ivar search_analyzer: The name of the analyzer used at search time for the field. This option
+ can be used only with searchable fields. It must be set together with indexAnalyzer and it
+ cannot be set together with the analyzer option. This property cannot be set to the name of a
+ language analyzer; use the analyzer property instead if you need a language analyzer. This
analyzer can be updated on an existing field. Must be null for complex fields. Possible values
include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft",
"bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene",
@@ -4752,12 +5978,12 @@ class SearchField(msrest.serialization.Model):
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
- :paramtype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
- :keyword index_analyzer: The name of the analyzer used at indexing time for the field. This
- option can be used only with searchable fields. It must be set together with searchAnalyzer and
- it cannot be set together with the analyzer option. This property cannot be set to the name of
- a language analyzer; use the analyzer property instead if you need a language analyzer. Once
- the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields.
+ :vartype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :ivar index_analyzer: The name of the analyzer used at indexing time for the field. This option
+ can be used only with searchable fields. It must be set together with searchAnalyzer and it
+ cannot be set together with the analyzer option. This property cannot be set to the name of a
+ language analyzer; use the analyzer property instead if you need a language analyzer. Once the
+ analyzer is chosen, it cannot be changed for the field. Must be null for complex fields.
Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene",
"bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft",
"zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft",
@@ -4775,21 +6001,21 @@ class SearchField(msrest.serialization.Model):
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
- :paramtype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
- :keyword normalizer: The name of the normalizer to use for the field. This option can be used
- only with fields with filterable, sortable, or facetable enabled. Once the normalizer is
- chosen, it cannot be changed for the field. Must be null for complex fields. Possible values
- include: "asciifolding", "elision", "lowercase", "standard", "uppercase".
- :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
- :keyword synonym_maps: A list of the names of synonym maps to associate with this field. This
+ :vartype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :ivar normalizer: The name of the normalizer to use for the field. This option can be used only
+ with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it
+ cannot be changed for the field. Must be null for complex fields. Possible values include:
+ "asciifolding", "elision", "lowercase", "standard", "uppercase".
+ :vartype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
+ :ivar synonym_maps: A list of the names of synonym maps to associate with this field. This
option can be used only with searchable fields. Currently only one synonym map per field is
supported. Assigning a synonym map to a field ensures that query terms targeting that field are
expanded at query-time using the rules in the synonym map. This attribute can be changed on
existing fields. Must be null or an empty collection for complex fields.
- :paramtype synonym_maps: list[str]
- :keyword fields: A list of sub-fields if this is a field of type Edm.ComplexType or
+ :vartype synonym_maps: list[str]
+ :ivar fields: A list of sub-fields if this is a field of type Edm.ComplexType or
Collection(Edm.ComplexType). Must be null or empty for simple fields.
- :paramtype fields: list[~azure.search.documents.indexes.models.SearchField]
+ :vartype fields: list[~azure.search.documents.indexes.models.SearchField]
"""
_validation = {
@@ -4833,6 +6059,143 @@ def __init__(
fields: Optional[List["SearchField"]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the field, which must be unique within the fields
+ collection of the index or parent field.
+ :paramtype name: str
+ :keyword type: Required. The data type of the field. Possible values include: "Edm.String",
+ "Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset",
+ "Edm.GeographyPoint", "Edm.ComplexType".
+ :paramtype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType
+ :keyword key: A value indicating whether the field uniquely identifies documents in the index.
+ Exactly one top-level field in each index must be chosen as the key field and it must be of
+ type Edm.String. Key fields can be used to look up documents directly and update or delete
+ specific documents. Default is false for simple fields and null for complex fields.
+ :paramtype key: bool
+ :keyword retrievable: A value indicating whether the field can be returned in a search result.
+ You can disable this option if you want to use a field (for example, margin) as a filter,
+ sorting, or scoring mechanism but do not want the field to be visible to the end user. This
+ property must be true for key fields, and it must be null for complex fields. This property can
+ be changed on existing fields. Enabling this property does not cause any increase in index
+ storage requirements. Default is true for simple fields and null for complex fields.
+ :paramtype retrievable: bool
+ :keyword searchable: A value indicating whether the field is full-text searchable. This means
+ it will undergo analysis such as word-breaking during indexing. If you set a searchable field
+ to a value like "sunny day", internally it will be split into the individual tokens "sunny" and
+ "day". This enables full-text searches for these terms. Fields of type Edm.String or
+ Collection(Edm.String) are searchable by default. This property must be false for simple fields
+ of other non-string data types, and it must be null for complex fields. Note: searchable fields
+ consume extra space in your index since Azure Cognitive Search will store an additional
+ tokenized version of the field value for full-text searches. If you want to save space in your
+ index and you don't need a field to be included in searches, set searchable to false.
+ :paramtype searchable: bool
+ :keyword filterable: A value indicating whether to enable the field to be referenced in $filter
+ queries. filterable differs from searchable in how strings are handled. Fields of type
+ Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so
+ comparisons are for exact matches only. For example, if you set such a field f to "sunny day",
+ $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property
+ must be null for complex fields. Default is true for simple fields and null for complex fields.
+ :paramtype filterable: bool
+ :keyword sortable: A value indicating whether to enable the field to be referenced in $orderby
+ expressions. By default Azure Cognitive Search sorts results by score, but in many experiences
+ users will want to sort by fields in the documents. A simple field can be sortable only if it
+ is single-valued (it has a single value in the scope of the parent document). Simple collection
+ fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex
+ collections are also multi-valued, and therefore cannot be sortable. This is true whether it's
+ an immediate parent field, or an ancestor field, that's the complex collection. Complex fields
+ cannot be sortable and the sortable property must be null for such fields. The default for
+ sortable is true for single-valued simple fields, false for multi-valued simple fields, and
+ null for complex fields.
+ :paramtype sortable: bool
+ :keyword facetable: A value indicating whether to enable the field to be referenced in facet
+ queries. Typically used in a presentation of search results that includes hit count by category
+ (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so
+ on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or
+ Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple
+ fields.
+ :paramtype facetable: bool
+ :keyword analyzer: The name of the analyzer to use for the field. This option can be used only
+ with searchable fields and it can't be set together with either searchAnalyzer or
+ indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null
+ for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene",
+ "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene",
+ "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft",
+ "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene",
+ "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft",
+ "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene",
+ "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene",
+ "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene",
+ "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft",
+ "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft",
+ "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene",
+ "pt-PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft",
+ "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft",
+ "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft",
+ "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft",
+ "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern",
+ "simple", "stop", "whitespace".
+ :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :keyword search_analyzer: The name of the analyzer used at search time for the field. This
+ option can be used only with searchable fields. It must be set together with indexAnalyzer and
+ it cannot be set together with the analyzer option. This property cannot be set to the name of
+ a language analyzer; use the analyzer property instead if you need a language analyzer. This
+ analyzer can be updated on an existing field. Must be null for complex fields. Possible values
+ include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft",
+ "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene",
+ "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene",
+ "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene",
+ "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene",
+ "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft",
+ "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft",
+ "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene",
+ "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft",
+ "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene",
+ "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft",
+ "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene",
+ "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
+ "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
+ "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
+ "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
+ "whitespace".
+ :paramtype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :keyword index_analyzer: The name of the analyzer used at indexing time for the field. This
+ option can be used only with searchable fields. It must be set together with searchAnalyzer and
+ it cannot be set together with the analyzer option. This property cannot be set to the name of
+ a language analyzer; use the analyzer property instead if you need a language analyzer. Once
+ the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields.
+ Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene",
+ "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft",
+ "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft",
+ "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft",
+ "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene",
+ "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft",
+ "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft",
+ "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft",
+ "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene",
+ "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene",
+ "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft",
+ "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene",
+ "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
+ "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
+ "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
+ "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
+ "whitespace".
+ :paramtype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
+ :keyword normalizer: The name of the normalizer to use for the field. This option can be used
+ only with fields with filterable, sortable, or facetable enabled. Once the normalizer is
+ chosen, it cannot be changed for the field. Must be null for complex fields. Possible values
+ include: "asciifolding", "elision", "lowercase", "standard", "uppercase".
+ :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
+ :keyword synonym_maps: A list of the names of synonym maps to associate with this field. This
+ option can be used only with searchable fields. Currently only one synonym map per field is
+ supported. Assigning a synonym map to a field ensures that query terms targeting that field are
+ expanded at query-time using the rules in the synonym map. This attribute can be changed on
+ existing fields. Must be null or an empty collection for complex fields.
+ :paramtype synonym_maps: list[str]
+ :keyword fields: A list of sub-fields if this is a field of type Edm.ComplexType or
+ Collection(Edm.ComplexType). Must be null or empty for simple fields.
+ :paramtype fields: list[~azure.search.documents.indexes.models.SearchField]
+ """
super(SearchField, self).__init__(**kwargs)
self.name = name
self.type = type
@@ -4855,31 +6218,31 @@ class SearchIndex(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the index.
- :paramtype name: str
- :keyword fields: Required. The fields of the index.
- :paramtype fields: list[~azure.search.documents.indexes.models.SearchField]
- :keyword scoring_profiles: The scoring profiles for the index.
- :paramtype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile]
- :keyword default_scoring_profile: The name of the scoring profile to use if none is specified
- in the query. If this property is not set and no scoring profile is specified in the query,
- then default scoring (tf-idf) will be used.
- :paramtype default_scoring_profile: str
- :keyword cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index.
- :paramtype cors_options: ~azure.search.documents.indexes.models.CorsOptions
- :keyword suggesters: The suggesters for the index.
- :paramtype suggesters: list[~azure.search.documents.indexes.models.Suggester]
- :keyword analyzers: The analyzers for the index.
- :paramtype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer]
- :keyword tokenizers: The tokenizers for the index.
- :paramtype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer]
- :keyword token_filters: The token filters for the index.
- :paramtype token_filters: list[~azure.search.documents.indexes.models.TokenFilter]
- :keyword char_filters: The character filters for the index.
- :paramtype char_filters: list[~azure.search.documents.indexes.models.CharFilter]
- :keyword normalizers: The normalizers for the index.
- :paramtype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer]
- :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ :ivar name: Required. The name of the index.
+ :vartype name: str
+ :ivar fields: Required. The fields of the index.
+ :vartype fields: list[~azure.search.documents.indexes.models.SearchField]
+ :ivar scoring_profiles: The scoring profiles for the index.
+ :vartype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile]
+ :ivar default_scoring_profile: The name of the scoring profile to use if none is specified in
+ the query. If this property is not set and no scoring profile is specified in the query, then
+ default scoring (tf-idf) will be used.
+ :vartype default_scoring_profile: str
+ :ivar cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index.
+ :vartype cors_options: ~azure.search.documents.indexes.models.CorsOptions
+ :ivar suggesters: The suggesters for the index.
+ :vartype suggesters: list[~azure.search.documents.indexes.models.Suggester]
+ :ivar analyzers: The analyzers for the index.
+ :vartype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer]
+ :ivar tokenizers: The tokenizers for the index.
+ :vartype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer]
+ :ivar token_filters: The token filters for the index.
+ :vartype token_filters: list[~azure.search.documents.indexes.models.TokenFilter]
+ :ivar char_filters: The character filters for the index.
+ :vartype char_filters: list[~azure.search.documents.indexes.models.CharFilter]
+ :ivar normalizers: The normalizers for the index.
+ :vartype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer]
+ :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your data when you
want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
@@ -4887,14 +6250,14 @@ class SearchIndex(msrest.serialization.Model):
needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
customer-managed keys is not available for free search services, and is only available for paid
services created on or after January 1, 2019.
- :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
- :keyword similarity: The type of similarity algorithm to be used when scoring and ranking the
+ :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :ivar similarity: The type of similarity algorithm to be used when scoring and ranking the
documents matching a search query. The similarity algorithm can only be defined at index
creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity
algorithm is used.
- :paramtype similarity: ~azure.search.documents.indexes.models.Similarity
- :keyword e_tag: The ETag of the index.
- :paramtype e_tag: str
+ :vartype similarity: ~azure.search.documents.indexes.models.Similarity
+ :ivar e_tag: The ETag of the index.
+ :vartype e_tag: str
"""
_validation = {
@@ -4938,6 +6301,48 @@ def __init__(
e_tag: Optional[str] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the index.
+ :paramtype name: str
+ :keyword fields: Required. The fields of the index.
+ :paramtype fields: list[~azure.search.documents.indexes.models.SearchField]
+ :keyword scoring_profiles: The scoring profiles for the index.
+ :paramtype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile]
+ :keyword default_scoring_profile: The name of the scoring profile to use if none is specified
+ in the query. If this property is not set and no scoring profile is specified in the query,
+ then default scoring (tf-idf) will be used.
+ :paramtype default_scoring_profile: str
+ :keyword cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index.
+ :paramtype cors_options: ~azure.search.documents.indexes.models.CorsOptions
+ :keyword suggesters: The suggesters for the index.
+ :paramtype suggesters: list[~azure.search.documents.indexes.models.Suggester]
+ :keyword analyzers: The analyzers for the index.
+ :paramtype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer]
+ :keyword tokenizers: The tokenizers for the index.
+ :paramtype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer]
+ :keyword token_filters: The token filters for the index.
+ :paramtype token_filters: list[~azure.search.documents.indexes.models.TokenFilter]
+ :keyword char_filters: The character filters for the index.
+ :paramtype char_filters: list[~azure.search.documents.indexes.models.CharFilter]
+ :keyword normalizers: The normalizers for the index.
+ :paramtype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer]
+ :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ This key is used to provide an additional level of encryption-at-rest for your data when you
+ want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
+ Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
+ Search will ignore attempts to set this property to null. You can change this property as
+ needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
+ customer-managed keys is not available for free search services, and is only available for paid
+ services created on or after January 1, 2019.
+ :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :keyword similarity: The type of similarity algorithm to be used when scoring and ranking the
+ documents matching a search query. The similarity algorithm can only be defined at index
+ creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity
+ algorithm is used.
+ :paramtype similarity: ~azure.search.documents.indexes.models.Similarity
+ :keyword e_tag: The ETag of the index.
+ :paramtype e_tag: str
+ """
super(SearchIndex, self).__init__(**kwargs)
self.name = name
self.fields = fields
@@ -4960,32 +6365,32 @@ class SearchIndexer(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the indexer.
- :paramtype name: str
- :keyword description: The description of the indexer.
- :paramtype description: str
- :keyword data_source_name: Required. The name of the datasource from which this indexer reads
+ :ivar name: Required. The name of the indexer.
+ :vartype name: str
+ :ivar description: The description of the indexer.
+ :vartype description: str
+ :ivar data_source_name: Required. The name of the datasource from which this indexer reads
data.
- :paramtype data_source_name: str
- :keyword skillset_name: The name of the skillset executing with this indexer.
- :paramtype skillset_name: str
- :keyword target_index_name: Required. The name of the index to which this indexer writes data.
- :paramtype target_index_name: str
- :keyword schedule: The schedule for this indexer.
- :paramtype schedule: ~azure.search.documents.indexes.models.IndexingSchedule
- :keyword parameters: Parameters for indexer execution.
- :paramtype parameters: ~azure.search.documents.indexes.models.IndexingParameters
- :keyword field_mappings: Defines mappings between fields in the data source and corresponding
+ :vartype data_source_name: str
+ :ivar skillset_name: The name of the skillset executing with this indexer.
+ :vartype skillset_name: str
+ :ivar target_index_name: Required. The name of the index to which this indexer writes data.
+ :vartype target_index_name: str
+ :ivar schedule: The schedule for this indexer.
+ :vartype schedule: ~azure.search.documents.indexes.models.IndexingSchedule
+ :ivar parameters: Parameters for indexer execution.
+ :vartype parameters: ~azure.search.documents.indexes.models.IndexingParameters
+ :ivar field_mappings: Defines mappings between fields in the data source and corresponding
target fields in the index.
- :paramtype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
- :keyword output_field_mappings: Output field mappings are applied after enrichment and
- immediately before indexing.
- :paramtype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
- :keyword is_disabled: A value indicating whether the indexer is disabled. Default is false.
- :paramtype is_disabled: bool
- :keyword e_tag: The ETag of the indexer.
- :paramtype e_tag: str
- :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ :vartype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
+ :ivar output_field_mappings: Output field mappings are applied after enrichment and immediately
+ before indexing.
+ :vartype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
+ :ivar is_disabled: A value indicating whether the indexer is disabled. Default is false.
+ :vartype is_disabled: bool
+ :ivar e_tag: The ETag of the indexer.
+ :vartype e_tag: str
+ :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your indexer
definition (as well as indexer execution status) when you want full assurance that no one, not
even Microsoft, can decrypt them in Azure Cognitive Search. Once you have encrypted your
@@ -4994,10 +6399,10 @@ class SearchIndexer(msrest.serialization.Model):
rotate your encryption key; Your indexer definition (and indexer execution status) will be
unaffected. Encryption with customer-managed keys is not available for free search services,
and is only available for paid services created on or after January 1, 2019.
- :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
- :keyword cache: Adds caching to an enrichment pipeline to allow for incremental modification
- steps without having to rebuild the index every time.
- :paramtype cache: ~azure.search.documents.indexes.models.SearchIndexerCache
+ :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :ivar cache: Adds caching to an enrichment pipeline to allow for incremental modification steps
+ without having to rebuild the index every time.
+ :vartype cache: ~azure.search.documents.indexes.models.SearchIndexerCache
"""
_validation = {
@@ -5040,6 +6445,46 @@ def __init__(
cache: Optional["SearchIndexerCache"] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the indexer.
+ :paramtype name: str
+ :keyword description: The description of the indexer.
+ :paramtype description: str
+ :keyword data_source_name: Required. The name of the datasource from which this indexer reads
+ data.
+ :paramtype data_source_name: str
+ :keyword skillset_name: The name of the skillset executing with this indexer.
+ :paramtype skillset_name: str
+ :keyword target_index_name: Required. The name of the index to which this indexer writes data.
+ :paramtype target_index_name: str
+ :keyword schedule: The schedule for this indexer.
+ :paramtype schedule: ~azure.search.documents.indexes.models.IndexingSchedule
+ :keyword parameters: Parameters for indexer execution.
+ :paramtype parameters: ~azure.search.documents.indexes.models.IndexingParameters
+ :keyword field_mappings: Defines mappings between fields in the data source and corresponding
+ target fields in the index.
+ :paramtype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
+ :keyword output_field_mappings: Output field mappings are applied after enrichment and
+ immediately before indexing.
+ :paramtype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping]
+ :keyword is_disabled: A value indicating whether the indexer is disabled. Default is false.
+ :paramtype is_disabled: bool
+ :keyword e_tag: The ETag of the indexer.
+ :paramtype e_tag: str
+ :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ This key is used to provide an additional level of encryption-at-rest for your indexer
+ definition (as well as indexer execution status) when you want full assurance that no one, not
+ even Microsoft, can decrypt them in Azure Cognitive Search. Once you have encrypted your
+ indexer definition, it will always remain encrypted. Azure Cognitive Search will ignore
+ attempts to set this property to null. You can change this property as needed if you want to
+ rotate your encryption key; Your indexer definition (and indexer execution status) will be
+ unaffected. Encryption with customer-managed keys is not available for free search services,
+ and is only available for paid services created on or after January 1, 2019.
+ :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :keyword cache: Adds caching to an enrichment pipeline to allow for incremental modification
+ steps without having to rebuild the index every time.
+ :paramtype cache: ~azure.search.documents.indexes.models.SearchIndexerCache
+ """
super(SearchIndexer, self).__init__(**kwargs)
self.name = name
self.description = description
@@ -5059,11 +6504,11 @@ def __init__(
class SearchIndexerCache(msrest.serialization.Model):
"""SearchIndexerCache.
- :keyword storage_connection_string: The connection string to the storage account where the
- cache data will be persisted.
- :paramtype storage_connection_string: str
- :keyword enable_reprocessing: Specifies whether incremental reprocessing is enabled.
- :paramtype enable_reprocessing: bool
+ :ivar storage_connection_string: The connection string to the storage account where the cache
+ data will be persisted.
+ :vartype storage_connection_string: str
+ :ivar enable_reprocessing: Specifies whether incremental reprocessing is enabled.
+ :vartype enable_reprocessing: bool
"""
_attribute_map = {
@@ -5078,6 +6523,13 @@ def __init__(
enable_reprocessing: Optional[bool] = None,
**kwargs
):
+ """
+ :keyword storage_connection_string: The connection string to the storage account where the
+ cache data will be persisted.
+ :paramtype storage_connection_string: str
+ :keyword enable_reprocessing: Specifies whether incremental reprocessing is enabled.
+ :paramtype enable_reprocessing: bool
+ """
super(SearchIndexerCache, self).__init__(**kwargs)
self.storage_connection_string = storage_connection_string
self.enable_reprocessing = enable_reprocessing
@@ -5088,12 +6540,12 @@ class SearchIndexerDataContainer(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the table or view (for Azure SQL data source) or
- collection (for CosmosDB data source) that will be indexed.
- :paramtype name: str
- :keyword query: A query that is applied to this data container. The syntax and meaning of this
+ :ivar name: Required. The name of the table or view (for Azure SQL data source) or collection
+ (for CosmosDB data source) that will be indexed.
+ :vartype name: str
+ :ivar query: A query that is applied to this data container. The syntax and meaning of this
parameter is datasource-specific. Not supported by Azure SQL datasources.
- :paramtype query: str
+ :vartype query: str
"""
_validation = {
@@ -5112,6 +6564,14 @@ def __init__(
query: Optional[str] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the table or view (for Azure SQL data source) or
+ collection (for CosmosDB data source) that will be indexed.
+ :paramtype name: str
+ :keyword query: A query that is applied to this data container. The syntax and meaning of this
+ parameter is datasource-specific. Not supported by Azure SQL datasources.
+ :paramtype query: str
+ """
super(SearchIndexerDataContainer, self).__init__(**kwargs)
self.name = name
self.query = query
@@ -5125,9 +6585,9 @@ class SearchIndexerDataIdentity(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the identity.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by
server.
- :paramtype odata_type: str
+ :vartype odata_type: str
"""
_validation = {
@@ -5146,6 +6606,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchIndexerDataIdentity, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
@@ -5155,9 +6617,9 @@ class SearchIndexerDataNoneIdentity(SearchIndexerDataIdentity):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the identity.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by
server.
- :paramtype odata_type: str
+ :vartype odata_type: str
"""
_validation = {
@@ -5172,6 +6634,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchIndexerDataNoneIdentity, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SearchIndexerDataNoneIdentity' # type: str
@@ -5181,31 +6645,31 @@ class SearchIndexerDataSource(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the datasource.
- :paramtype name: str
- :keyword description: The description of the datasource.
- :paramtype description: str
- :keyword type: Required. The type of the datasource. Possible values include: "azuresql",
+ :ivar name: Required. The name of the datasource.
+ :vartype name: str
+ :ivar description: The description of the datasource.
+ :vartype description: str
+ :ivar type: Required. The type of the datasource. Possible values include: "azuresql",
"cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2".
- :paramtype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType
- :keyword credentials: Required. Credentials for the datasource.
- :paramtype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials
- :keyword container: Required. The data container for the datasource.
- :paramtype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer
- :keyword identity: An explicit managed identity to use for this datasource. If not specified
- and the connection string is a managed identity, the system-assigned managed identity is used.
- If not specified, the value remains unchanged. If "none" is specified, the value of this
- property is cleared.
- :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
- :keyword data_change_detection_policy: The data change detection policy for the datasource.
- :paramtype data_change_detection_policy:
+ :vartype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType
+ :ivar credentials: Required. Credentials for the datasource.
+ :vartype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials
+ :ivar container: Required. The data container for the datasource.
+ :vartype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer
+ :ivar identity: An explicit managed identity to use for this datasource. If not specified and
+ the connection string is a managed identity, the system-assigned managed identity is used. If
+ not specified, the value remains unchanged. If "none" is specified, the value of this property
+ is cleared.
+ :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
+ :ivar data_change_detection_policy: The data change detection policy for the datasource.
+ :vartype data_change_detection_policy:
~azure.search.documents.indexes.models.DataChangeDetectionPolicy
- :keyword data_deletion_detection_policy: The data deletion detection policy for the datasource.
- :paramtype data_deletion_detection_policy:
+ :ivar data_deletion_detection_policy: The data deletion detection policy for the datasource.
+ :vartype data_deletion_detection_policy:
~azure.search.documents.indexes.models.DataDeletionDetectionPolicy
- :keyword e_tag: The ETag of the data source.
- :paramtype e_tag: str
- :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ :ivar e_tag: The ETag of the data source.
+ :vartype e_tag: str
+ :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your datasource
definition when you want full assurance that no one, not even Microsoft, can decrypt your data
source definition in Azure Cognitive Search. Once you have encrypted your data source
@@ -5214,7 +6678,7 @@ class SearchIndexerDataSource(msrest.serialization.Model):
encryption key; Your datasource definition will be unaffected. Encryption with customer-managed
keys is not available for free search services, and is only available for paid services created
on or after January 1, 2019.
- :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
"""
_validation = {
@@ -5252,6 +6716,42 @@ def __init__(
encryption_key: Optional["SearchResourceEncryptionKey"] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the datasource.
+ :paramtype name: str
+ :keyword description: The description of the datasource.
+ :paramtype description: str
+ :keyword type: Required. The type of the datasource. Possible values include: "azuresql",
+ "cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2".
+ :paramtype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType
+ :keyword credentials: Required. Credentials for the datasource.
+ :paramtype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials
+ :keyword container: Required. The data container for the datasource.
+ :paramtype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer
+ :keyword identity: An explicit managed identity to use for this datasource. If not specified
+ and the connection string is a managed identity, the system-assigned managed identity is used.
+ If not specified, the value remains unchanged. If "none" is specified, the value of this
+ property is cleared.
+ :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
+ :keyword data_change_detection_policy: The data change detection policy for the datasource.
+ :paramtype data_change_detection_policy:
+ ~azure.search.documents.indexes.models.DataChangeDetectionPolicy
+ :keyword data_deletion_detection_policy: The data deletion detection policy for the datasource.
+ :paramtype data_deletion_detection_policy:
+ ~azure.search.documents.indexes.models.DataDeletionDetectionPolicy
+ :keyword e_tag: The ETag of the data source.
+ :paramtype e_tag: str
+ :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ This key is used to provide an additional level of encryption-at-rest for your datasource
+ definition when you want full assurance that no one, not even Microsoft, can decrypt your data
+ source definition in Azure Cognitive Search. Once you have encrypted your data source
+ definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set
+ this property to null. You can change this property as needed if you want to rotate your
+ encryption key; Your datasource definition will be unaffected. Encryption with customer-managed
+ keys is not available for free search services, and is only available for paid services created
+ on or after January 1, 2019.
+ :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ """
super(SearchIndexerDataSource, self).__init__(**kwargs)
self.name = name
self.description = description
@@ -5270,14 +6770,14 @@ class SearchIndexerDataUserAssignedIdentity(SearchIndexerDataIdentity):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the identity.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the identity.Constant filled by
server.
- :paramtype odata_type: str
- :keyword user_assigned_identity: Required. The fully qualified Azure resource Id of a user
+ :vartype odata_type: str
+ :ivar user_assigned_identity: Required. The fully qualified Azure resource Id of a user
assigned managed identity typically in the form
"/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId"
that should have been assigned to the search service.
- :paramtype user_assigned_identity: str
+ :vartype user_assigned_identity: str
"""
_validation = {
@@ -5296,6 +6796,13 @@ def __init__(
user_assigned_identity: str,
**kwargs
):
+ """
+ :keyword user_assigned_identity: Required. The fully qualified Azure resource Id of a user
+ assigned managed identity typically in the form
+ "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId"
+ that should have been assigned to the search service.
+ :paramtype user_assigned_identity: str
+ """
super(SearchIndexerDataUserAssignedIdentity, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SearchIndexerDataUserAssignedIdentity' # type: str
self.user_assigned_identity = user_assigned_identity
@@ -5351,6 +6858,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchIndexerError, self).__init__(**kwargs)
self.key = None
self.error_message = None
@@ -5365,11 +6874,11 @@ class SearchIndexerKnowledgeStore(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword storage_connection_string: Required. The connection string to the storage account
+ :ivar storage_connection_string: Required. The connection string to the storage account
projections will be stored in.
- :paramtype storage_connection_string: str
- :keyword projections: Required. A list of additional projections to perform during indexing.
- :paramtype projections:
+ :vartype storage_connection_string: str
+ :ivar projections: Required. A list of additional projections to perform during indexing.
+ :vartype projections:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreProjection]
"""
@@ -5390,6 +6899,14 @@ def __init__(
projections: List["SearchIndexerKnowledgeStoreProjection"],
**kwargs
):
+ """
+ :keyword storage_connection_string: Required. The connection string to the storage account
+ projections will be stored in.
+ :paramtype storage_connection_string: str
+ :keyword projections: Required. A list of additional projections to perform during indexing.
+ :paramtype projections:
+ list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreProjection]
+ """
super(SearchIndexerKnowledgeStore, self).__init__(**kwargs)
self.storage_connection_string = storage_connection_string
self.projections = projections
@@ -5398,16 +6915,16 @@ def __init__(
class SearchIndexerKnowledgeStoreProjectionSelector(msrest.serialization.Model):
"""Abstract class to share properties between concrete selectors.
- :keyword reference_key_name: Name of reference key to different projection.
- :paramtype reference_key_name: str
- :keyword generated_key_name: Name of generated key to store projection under.
- :paramtype generated_key_name: str
- :keyword source: Source data to project.
- :paramtype source: str
- :keyword source_context: Source context for complex projections.
- :paramtype source_context: str
- :keyword inputs: Nested inputs for complex projections.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar reference_key_name: Name of reference key to different projection.
+ :vartype reference_key_name: str
+ :ivar generated_key_name: Name of generated key to store projection under.
+ :vartype generated_key_name: str
+ :ivar source: Source data to project.
+ :vartype source: str
+ :ivar source_context: Source context for complex projections.
+ :vartype source_context: str
+ :ivar inputs: Nested inputs for complex projections.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
"""
_attribute_map = {
@@ -5428,6 +6945,18 @@ def __init__(
inputs: Optional[List["InputFieldMappingEntry"]] = None,
**kwargs
):
+ """
+ :keyword reference_key_name: Name of reference key to different projection.
+ :paramtype reference_key_name: str
+ :keyword generated_key_name: Name of generated key to store projection under.
+ :paramtype generated_key_name: str
+ :keyword source: Source data to project.
+ :paramtype source: str
+ :keyword source_context: Source context for complex projections.
+ :paramtype source_context: str
+ :keyword inputs: Nested inputs for complex projections.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ """
super(SearchIndexerKnowledgeStoreProjectionSelector, self).__init__(**kwargs)
self.reference_key_name = reference_key_name
self.generated_key_name = generated_key_name
@@ -5441,18 +6970,18 @@ class SearchIndexerKnowledgeStoreBlobProjectionSelector(SearchIndexerKnowledgeSt
All required parameters must be populated in order to send to Azure.
- :keyword reference_key_name: Name of reference key to different projection.
- :paramtype reference_key_name: str
- :keyword generated_key_name: Name of generated key to store projection under.
- :paramtype generated_key_name: str
- :keyword source: Source data to project.
- :paramtype source: str
- :keyword source_context: Source context for complex projections.
- :paramtype source_context: str
- :keyword inputs: Nested inputs for complex projections.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword storage_container: Required. Blob container to store projections in.
- :paramtype storage_container: str
+ :ivar reference_key_name: Name of reference key to different projection.
+ :vartype reference_key_name: str
+ :ivar generated_key_name: Name of generated key to store projection under.
+ :vartype generated_key_name: str
+ :ivar source: Source data to project.
+ :vartype source: str
+ :ivar source_context: Source context for complex projections.
+ :vartype source_context: str
+ :ivar inputs: Nested inputs for complex projections.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar storage_container: Required. Blob container to store projections in.
+ :vartype storage_container: str
"""
_validation = {
@@ -5479,6 +7008,20 @@ def __init__(
inputs: Optional[List["InputFieldMappingEntry"]] = None,
**kwargs
):
+ """
+ :keyword reference_key_name: Name of reference key to different projection.
+ :paramtype reference_key_name: str
+ :keyword generated_key_name: Name of generated key to store projection under.
+ :paramtype generated_key_name: str
+ :keyword source: Source data to project.
+ :paramtype source: str
+ :keyword source_context: Source context for complex projections.
+ :paramtype source_context: str
+ :keyword inputs: Nested inputs for complex projections.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword storage_container: Required. Blob container to store projections in.
+ :paramtype storage_container: str
+ """
super(SearchIndexerKnowledgeStoreBlobProjectionSelector, self).__init__(reference_key_name=reference_key_name, generated_key_name=generated_key_name, source=source, source_context=source_context, inputs=inputs, **kwargs)
self.storage_container = storage_container
@@ -5488,18 +7031,18 @@ class SearchIndexerKnowledgeStoreFileProjectionSelector(SearchIndexerKnowledgeSt
All required parameters must be populated in order to send to Azure.
- :keyword reference_key_name: Name of reference key to different projection.
- :paramtype reference_key_name: str
- :keyword generated_key_name: Name of generated key to store projection under.
- :paramtype generated_key_name: str
- :keyword source: Source data to project.
- :paramtype source: str
- :keyword source_context: Source context for complex projections.
- :paramtype source_context: str
- :keyword inputs: Nested inputs for complex projections.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword storage_container: Required. Blob container to store projections in.
- :paramtype storage_container: str
+ :ivar reference_key_name: Name of reference key to different projection.
+ :vartype reference_key_name: str
+ :ivar generated_key_name: Name of generated key to store projection under.
+ :vartype generated_key_name: str
+ :ivar source: Source data to project.
+ :vartype source: str
+ :ivar source_context: Source context for complex projections.
+ :vartype source_context: str
+ :ivar inputs: Nested inputs for complex projections.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar storage_container: Required. Blob container to store projections in.
+ :vartype storage_container: str
"""
_validation = {
@@ -5526,6 +7069,20 @@ def __init__(
inputs: Optional[List["InputFieldMappingEntry"]] = None,
**kwargs
):
+ """
+ :keyword reference_key_name: Name of reference key to different projection.
+ :paramtype reference_key_name: str
+ :keyword generated_key_name: Name of generated key to store projection under.
+ :paramtype generated_key_name: str
+ :keyword source: Source data to project.
+ :paramtype source: str
+ :keyword source_context: Source context for complex projections.
+ :paramtype source_context: str
+ :keyword inputs: Nested inputs for complex projections.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword storage_container: Required. Blob container to store projections in.
+ :paramtype storage_container: str
+ """
super(SearchIndexerKnowledgeStoreFileProjectionSelector, self).__init__(reference_key_name=reference_key_name, generated_key_name=generated_key_name, source=source, source_context=source_context, inputs=inputs, storage_container=storage_container, **kwargs)
@@ -5534,18 +7091,18 @@ class SearchIndexerKnowledgeStoreObjectProjectionSelector(SearchIndexerKnowledge
All required parameters must be populated in order to send to Azure.
- :keyword reference_key_name: Name of reference key to different projection.
- :paramtype reference_key_name: str
- :keyword generated_key_name: Name of generated key to store projection under.
- :paramtype generated_key_name: str
- :keyword source: Source data to project.
- :paramtype source: str
- :keyword source_context: Source context for complex projections.
- :paramtype source_context: str
- :keyword inputs: Nested inputs for complex projections.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword storage_container: Required. Blob container to store projections in.
- :paramtype storage_container: str
+ :ivar reference_key_name: Name of reference key to different projection.
+ :vartype reference_key_name: str
+ :ivar generated_key_name: Name of generated key to store projection under.
+ :vartype generated_key_name: str
+ :ivar source: Source data to project.
+ :vartype source: str
+ :ivar source_context: Source context for complex projections.
+ :vartype source_context: str
+ :ivar inputs: Nested inputs for complex projections.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar storage_container: Required. Blob container to store projections in.
+ :vartype storage_container: str
"""
_validation = {
@@ -5572,20 +7129,34 @@ def __init__(
inputs: Optional[List["InputFieldMappingEntry"]] = None,
**kwargs
):
+ """
+ :keyword reference_key_name: Name of reference key to different projection.
+ :paramtype reference_key_name: str
+ :keyword generated_key_name: Name of generated key to store projection under.
+ :paramtype generated_key_name: str
+ :keyword source: Source data to project.
+ :paramtype source: str
+ :keyword source_context: Source context for complex projections.
+ :paramtype source_context: str
+ :keyword inputs: Nested inputs for complex projections.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword storage_container: Required. Blob container to store projections in.
+ :paramtype storage_container: str
+ """
super(SearchIndexerKnowledgeStoreObjectProjectionSelector, self).__init__(reference_key_name=reference_key_name, generated_key_name=generated_key_name, source=source, source_context=source_context, inputs=inputs, storage_container=storage_container, **kwargs)
class SearchIndexerKnowledgeStoreProjection(msrest.serialization.Model):
"""Container object for various projection selectors.
- :keyword tables: Projections to Azure Table storage.
- :paramtype tables:
+ :ivar tables: Projections to Azure Table storage.
+ :vartype tables:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreTableProjectionSelector]
- :keyword objects: Projections to Azure Blob storage.
- :paramtype objects:
+ :ivar objects: Projections to Azure Blob storage.
+ :vartype objects:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreObjectProjectionSelector]
- :keyword files: Projections to Azure File storage.
- :paramtype files:
+ :ivar files: Projections to Azure File storage.
+ :vartype files:
list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreFileProjectionSelector]
"""
@@ -5603,6 +7174,17 @@ def __init__(
files: Optional[List["SearchIndexerKnowledgeStoreFileProjectionSelector"]] = None,
**kwargs
):
+ """
+ :keyword tables: Projections to Azure Table storage.
+ :paramtype tables:
+ list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreTableProjectionSelector]
+ :keyword objects: Projections to Azure Blob storage.
+ :paramtype objects:
+ list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreObjectProjectionSelector]
+ :keyword files: Projections to Azure File storage.
+ :paramtype files:
+ list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreFileProjectionSelector]
+ """
super(SearchIndexerKnowledgeStoreProjection, self).__init__(**kwargs)
self.tables = tables
self.objects = objects
@@ -5614,18 +7196,18 @@ class SearchIndexerKnowledgeStoreTableProjectionSelector(SearchIndexerKnowledgeS
All required parameters must be populated in order to send to Azure.
- :keyword reference_key_name: Name of reference key to different projection.
- :paramtype reference_key_name: str
- :keyword generated_key_name: Name of generated key to store projection under.
- :paramtype generated_key_name: str
- :keyword source: Source data to project.
- :paramtype source: str
- :keyword source_context: Source context for complex projections.
- :paramtype source_context: str
- :keyword inputs: Nested inputs for complex projections.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword table_name: Required. Name of the Azure table to store projected data in.
- :paramtype table_name: str
+ :ivar reference_key_name: Name of reference key to different projection.
+ :vartype reference_key_name: str
+ :ivar generated_key_name: Name of generated key to store projection under.
+ :vartype generated_key_name: str
+ :ivar source: Source data to project.
+ :vartype source: str
+ :ivar source_context: Source context for complex projections.
+ :vartype source_context: str
+ :ivar inputs: Nested inputs for complex projections.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar table_name: Required. Name of the Azure table to store projected data in.
+ :vartype table_name: str
"""
_validation = {
@@ -5652,6 +7234,20 @@ def __init__(
inputs: Optional[List["InputFieldMappingEntry"]] = None,
**kwargs
):
+ """
+ :keyword reference_key_name: Name of reference key to different projection.
+ :paramtype reference_key_name: str
+ :keyword generated_key_name: Name of generated key to store projection under.
+ :paramtype generated_key_name: str
+ :keyword source: Source data to project.
+ :paramtype source: str
+ :keyword source_context: Source context for complex projections.
+ :paramtype source_context: str
+ :keyword inputs: Nested inputs for complex projections.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword table_name: Required. Name of the Azure table to store projected data in.
+ :paramtype table_name: str
+ """
super(SearchIndexerKnowledgeStoreTableProjectionSelector, self).__init__(reference_key_name=reference_key_name, generated_key_name=generated_key_name, source=source, source_context=source_context, inputs=inputs, **kwargs)
self.table_name = table_name
@@ -5688,6 +7284,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchIndexerLimits, self).__init__(**kwargs)
self.max_run_time = None
self.max_document_extraction_size = None
@@ -5699,22 +7297,22 @@ class SearchIndexerSkillset(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the skillset.
- :paramtype name: str
- :keyword description: The description of the skillset.
- :paramtype description: str
- :keyword skills: Required. A list of skills in the skillset.
- :paramtype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill]
- :keyword cognitive_services_account: Details about cognitive services to be used when running
+ :ivar name: Required. The name of the skillset.
+ :vartype name: str
+ :ivar description: The description of the skillset.
+ :vartype description: str
+ :ivar skills: Required. A list of skills in the skillset.
+ :vartype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill]
+ :ivar cognitive_services_account: Details about cognitive services to be used when running
skills.
- :paramtype cognitive_services_account:
+ :vartype cognitive_services_account:
~azure.search.documents.indexes.models.CognitiveServicesAccount
- :keyword knowledge_store: Definition of additional projections to azure blob, table, or files,
- of enriched data.
- :paramtype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore
- :keyword e_tag: The ETag of the skillset.
- :paramtype e_tag: str
- :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ :ivar knowledge_store: Definition of additional projections to azure blob, table, or files, of
+ enriched data.
+ :vartype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore
+ :ivar e_tag: The ETag of the skillset.
+ :vartype e_tag: str
+ :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your skillset
definition when you want full assurance that no one, not even Microsoft, can decrypt your
skillset definition in Azure Cognitive Search. Once you have encrypted your skillset
@@ -5723,7 +7321,7 @@ class SearchIndexerSkillset(msrest.serialization.Model):
encryption key; Your skillset definition will be unaffected. Encryption with customer-managed
keys is not available for free search services, and is only available for paid services created
on or after January 1, 2019.
- :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
"""
_validation = {
@@ -5753,6 +7351,33 @@ def __init__(
encryption_key: Optional["SearchResourceEncryptionKey"] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the skillset.
+ :paramtype name: str
+ :keyword description: The description of the skillset.
+ :paramtype description: str
+ :keyword skills: Required. A list of skills in the skillset.
+ :paramtype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill]
+ :keyword cognitive_services_account: Details about cognitive services to be used when running
+ skills.
+ :paramtype cognitive_services_account:
+ ~azure.search.documents.indexes.models.CognitiveServicesAccount
+ :keyword knowledge_store: Definition of additional projections to azure blob, table, or files,
+ of enriched data.
+ :paramtype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore
+ :keyword e_tag: The ETag of the skillset.
+ :paramtype e_tag: str
+ :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ This key is used to provide an additional level of encryption-at-rest for your skillset
+ definition when you want full assurance that no one, not even Microsoft, can decrypt your
+ skillset definition in Azure Cognitive Search. Once you have encrypted your skillset
+ definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set
+ this property to null. You can change this property as needed if you want to rotate your
+ encryption key; Your skillset definition will be unaffected. Encryption with customer-managed
+ keys is not available for free search services, and is only available for paid services created
+ on or after January 1, 2019.
+ :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ """
super(SearchIndexerSkillset, self).__init__(**kwargs)
self.name = name
self.description = description
@@ -5800,6 +7425,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchIndexerStatus, self).__init__(**kwargs)
self.status = None
self.last_result = None
@@ -5850,6 +7477,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SearchIndexerWarning, self).__init__(**kwargs)
self.key = None
self.message = None
@@ -5863,25 +7492,25 @@ class SearchResourceEncryptionKey(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword key_name: Required. The name of your Azure Key Vault key to be used to encrypt your
+ :ivar key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data
+ at rest.
+ :vartype key_name: str
+ :ivar key_version: Required. The version of your Azure Key Vault key to be used to encrypt your
data at rest.
- :paramtype key_name: str
- :keyword key_version: Required. The version of your Azure Key Vault key to be used to encrypt
- your data at rest.
- :paramtype key_version: str
- :keyword vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name,
- that contains the key to be used to encrypt your data at rest. An example URI might be
+ :vartype key_version: str
+ :ivar vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that
+ contains the key to be used to encrypt your data at rest. An example URI might be
https://my-keyvault-name.vault.azure.net.
- :paramtype vault_uri: str
- :keyword access_credentials: Optional Azure Active Directory credentials used for accessing
- your Azure Key Vault. Not required if using managed identity instead.
- :paramtype access_credentials:
+ :vartype vault_uri: str
+ :ivar access_credentials: Optional Azure Active Directory credentials used for accessing your
+ Azure Key Vault. Not required if using managed identity instead.
+ :vartype access_credentials:
~azure.search.documents.indexes.models.AzureActiveDirectoryApplicationCredentials
- :keyword identity: An explicit managed identity to use for this encryption key. If not
- specified and the access credentials property is null, the system-assigned managed identity is
- used. On update to the resource, if the explicit identity is unspecified, it remains unchanged.
- If "none" is specified, the value of this property is cleared.
- :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
+ :ivar identity: An explicit managed identity to use for this encryption key. If not specified
+ and the access credentials property is null, the system-assigned managed identity is used. On
+ update to the resource, if the explicit identity is unspecified, it remains unchanged. If
+ "none" is specified, the value of this property is cleared.
+ :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
"""
_validation = {
@@ -5908,6 +7537,27 @@ def __init__(
identity: Optional["SearchIndexerDataIdentity"] = None,
**kwargs
):
+ """
+ :keyword key_name: Required. The name of your Azure Key Vault key to be used to encrypt your
+ data at rest.
+ :paramtype key_name: str
+ :keyword key_version: Required. The version of your Azure Key Vault key to be used to encrypt
+ your data at rest.
+ :paramtype key_version: str
+ :keyword vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name,
+ that contains the key to be used to encrypt your data at rest. An example URI might be
+ https://my-keyvault-name.vault.azure.net.
+ :paramtype vault_uri: str
+ :keyword access_credentials: Optional Azure Active Directory credentials used for accessing
+ your Azure Key Vault. Not required if using managed identity instead.
+ :paramtype access_credentials:
+ ~azure.search.documents.indexes.models.AzureActiveDirectoryApplicationCredentials
+ :keyword identity: An explicit managed identity to use for this encryption key. If not
+ specified and the access credentials property is null, the system-assigned managed identity is
+ used. On update to the resource, if the explicit identity is unspecified, it remains unchanged.
+ If "none" is specified, the value of this property is cleared.
+ :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
+ """
super(SearchResourceEncryptionKey, self).__init__(**kwargs)
self.key_name = key_name
self.key_version = key_version
@@ -5921,30 +7571,29 @@ class SentimentSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT",
"ru", "es", "sv", "tr".
- :paramtype default_language_code: str or
+ :vartype default_language_code: str or
~azure.search.documents.indexes.models.SentimentSkillLanguage
"""
@@ -5975,6 +7624,30 @@ def __init__(
default_language_code: Optional[Union[str, "SentimentSkillLanguage"]] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT",
+ "ru", "es", "sv", "tr".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.SentimentSkillLanguage
+ """
super(SentimentSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Text.SentimentSkill' # type: str
self.default_language_code = default_language_code
@@ -5985,36 +7658,35 @@ class SentimentSkillV3(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
- :paramtype default_language_code: str
- :keyword include_opinion_mining: If set to true, the skill output will include information from
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
+ :vartype default_language_code: str
+ :ivar include_opinion_mining: If set to true, the skill output will include information from
Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated
assessment (adjective) in the text. Default is false.
- :paramtype include_opinion_mining: bool
- :keyword model_version: The version of the model to use when calling the Text Analytics
- service. It will default to the latest available when not specified. We recommend you do not
- specify this value unless absolutely necessary.
- :paramtype model_version: str
+ :vartype include_opinion_mining: bool
+ :ivar model_version: The version of the model to use when calling the Text Analytics service.
+ It will default to the latest available when not specified. We recommend you do not specify
+ this value unless absolutely necessary.
+ :vartype model_version: str
"""
_validation = {
@@ -6048,6 +7720,35 @@ def __init__(
model_version: Optional[str] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :paramtype default_language_code: str
+ :keyword include_opinion_mining: If set to true, the skill output will include information from
+ Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated
+ assessment (adjective) in the text. Default is false.
+ :paramtype include_opinion_mining: bool
+ :keyword model_version: The version of the model to use when calling the Text Analytics
+ service. It will default to the latest available when not specified. We recommend you do not
+ specify this value unless absolutely necessary.
+ :paramtype model_version: str
+ """
super(SentimentSkillV3, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Text.V3.SentimentSkill' # type: str
self.default_language_code = default_language_code
@@ -6060,21 +7761,20 @@ class ServiceCounters(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword document_counter: Required. Total number of documents across all indexes in the
- service.
- :paramtype document_counter: ~azure.search.documents.indexes.models.ResourceCounter
- :keyword index_counter: Required. Total number of indexes.
- :paramtype index_counter: ~azure.search.documents.indexes.models.ResourceCounter
- :keyword indexer_counter: Required. Total number of indexers.
- :paramtype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter
- :keyword data_source_counter: Required. Total number of data sources.
- :paramtype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter
- :keyword storage_size_counter: Required. Total size of used storage in bytes.
- :paramtype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter
- :keyword synonym_map_counter: Required. Total number of synonym maps.
- :paramtype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter
- :keyword skillset_counter: Total number of skillsets.
- :paramtype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar document_counter: Required. Total number of documents across all indexes in the service.
+ :vartype document_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar index_counter: Required. Total number of indexes.
+ :vartype index_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar indexer_counter: Required. Total number of indexers.
+ :vartype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar data_source_counter: Required. Total number of data sources.
+ :vartype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar storage_size_counter: Required. Total size of used storage in bytes.
+ :vartype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar synonym_map_counter: Required. Total number of synonym maps.
+ :vartype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :ivar skillset_counter: Total number of skillsets.
+ :vartype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter
"""
_validation = {
@@ -6108,6 +7808,23 @@ def __init__(
skillset_counter: Optional["ResourceCounter"] = None,
**kwargs
):
+ """
+ :keyword document_counter: Required. Total number of documents across all indexes in the
+ service.
+ :paramtype document_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :keyword index_counter: Required. Total number of indexes.
+ :paramtype index_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :keyword indexer_counter: Required. Total number of indexers.
+ :paramtype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :keyword data_source_counter: Required. Total number of data sources.
+ :paramtype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :keyword storage_size_counter: Required. Total size of used storage in bytes.
+ :paramtype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :keyword synonym_map_counter: Required. Total number of synonym maps.
+ :paramtype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ :keyword skillset_counter: Total number of skillsets.
+ :paramtype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter
+ """
super(ServiceCounters, self).__init__(**kwargs)
self.document_counter = document_counter
self.index_counter = index_counter
@@ -6121,17 +7838,17 @@ def __init__(
class ServiceLimits(msrest.serialization.Model):
"""Represents various service level limits.
- :keyword max_fields_per_index: The maximum allowed fields per index.
- :paramtype max_fields_per_index: int
- :keyword max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in
- an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3.
- :paramtype max_field_nesting_depth_per_index: int
- :keyword max_complex_collection_fields_per_index: The maximum number of fields of type
+ :ivar max_fields_per_index: The maximum allowed fields per index.
+ :vartype max_fields_per_index: int
+ :ivar max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in an
+ index, including the top-level complex field. For example, a/b/c has a nesting depth of 3.
+ :vartype max_field_nesting_depth_per_index: int
+ :ivar max_complex_collection_fields_per_index: The maximum number of fields of type
Collection(Edm.ComplexType) allowed in an index.
- :paramtype max_complex_collection_fields_per_index: int
- :keyword max_complex_objects_in_collections_per_document: The maximum number of objects in
- complex collections allowed per document.
- :paramtype max_complex_objects_in_collections_per_document: int
+ :vartype max_complex_collection_fields_per_index: int
+ :ivar max_complex_objects_in_collections_per_document: The maximum number of objects in complex
+ collections allowed per document.
+ :vartype max_complex_objects_in_collections_per_document: int
"""
_attribute_map = {
@@ -6150,6 +7867,19 @@ def __init__(
max_complex_objects_in_collections_per_document: Optional[int] = None,
**kwargs
):
+ """
+ :keyword max_fields_per_index: The maximum allowed fields per index.
+ :paramtype max_fields_per_index: int
+ :keyword max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in
+ an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3.
+ :paramtype max_field_nesting_depth_per_index: int
+ :keyword max_complex_collection_fields_per_index: The maximum number of fields of type
+ Collection(Edm.ComplexType) allowed in an index.
+ :paramtype max_complex_collection_fields_per_index: int
+ :keyword max_complex_objects_in_collections_per_document: The maximum number of objects in
+ complex collections allowed per document.
+ :paramtype max_complex_objects_in_collections_per_document: int
+ """
super(ServiceLimits, self).__init__(**kwargs)
self.max_fields_per_index = max_fields_per_index
self.max_field_nesting_depth_per_index = max_field_nesting_depth_per_index
@@ -6162,10 +7892,10 @@ class ServiceStatistics(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword counters: Required. Service level resource counters.
- :paramtype counters: ~azure.search.documents.indexes.models.ServiceCounters
- :keyword limits: Required. Service level general limits.
- :paramtype limits: ~azure.search.documents.indexes.models.ServiceLimits
+ :ivar counters: Required. Service level resource counters.
+ :vartype counters: ~azure.search.documents.indexes.models.ServiceCounters
+ :ivar limits: Required. Service level general limits.
+ :vartype limits: ~azure.search.documents.indexes.models.ServiceLimits
"""
_validation = {
@@ -6185,6 +7915,12 @@ def __init__(
limits: "ServiceLimits",
**kwargs
):
+ """
+ :keyword counters: Required. Service level resource counters.
+ :paramtype counters: ~azure.search.documents.indexes.models.ServiceCounters
+ :keyword limits: Required. Service level general limits.
+ :paramtype limits: ~azure.search.documents.indexes.models.ServiceLimits
+ """
super(ServiceStatistics, self).__init__(**kwargs)
self.counters = counters
self.limits = limits
@@ -6195,26 +7931,25 @@ class ShaperSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
"""
_validation = {
@@ -6242,6 +7977,25 @@ def __init__(
context: Optional[str] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ """
super(ShaperSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Util.ShaperSkill' # type: str
@@ -6251,31 +8005,31 @@ class ShingleTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword max_shingle_size: The maximum shingle size. Default and minimum value is 2.
- :paramtype max_shingle_size: int
- :keyword min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be
- less than the value of maxShingleSize.
- :paramtype min_shingle_size: int
- :keyword output_unigrams: A value indicating whether the output stream will contain the input
+ :vartype name: str
+ :ivar max_shingle_size: The maximum shingle size. Default and minimum value is 2.
+ :vartype max_shingle_size: int
+ :ivar min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be less
+ than the value of maxShingleSize.
+ :vartype min_shingle_size: int
+ :ivar output_unigrams: A value indicating whether the output stream will contain the input
tokens (unigrams) as well as shingles. Default is true.
- :paramtype output_unigrams: bool
- :keyword output_unigrams_if_no_shingles: A value indicating whether to output unigrams for
- those times when no shingles are available. This property takes precedence when outputUnigrams
- is set to false. Default is false.
- :paramtype output_unigrams_if_no_shingles: bool
- :keyword token_separator: The string to use when joining adjacent tokens to form a shingle.
+ :vartype output_unigrams: bool
+ :ivar output_unigrams_if_no_shingles: A value indicating whether to output unigrams for those
+ times when no shingles are available. This property takes precedence when outputUnigrams is set
+ to false. Default is false.
+ :vartype output_unigrams_if_no_shingles: bool
+ :ivar token_separator: The string to use when joining adjacent tokens to form a shingle.
Default is a single space (" ").
- :paramtype token_separator: str
- :keyword filter_token: The string to insert for each position at which there is no token.
- Default is an underscore ("_").
- :paramtype filter_token: str
+ :vartype token_separator: str
+ :ivar filter_token: The string to insert for each position at which there is no token. Default
+ is an underscore ("_").
+ :vartype filter_token: str
"""
_validation = {
@@ -6308,6 +8062,30 @@ def __init__(
filter_token: Optional[str] = "_",
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_shingle_size: The maximum shingle size. Default and minimum value is 2.
+ :paramtype max_shingle_size: int
+ :keyword min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be
+ less than the value of maxShingleSize.
+ :paramtype min_shingle_size: int
+ :keyword output_unigrams: A value indicating whether the output stream will contain the input
+ tokens (unigrams) as well as shingles. Default is true.
+ :paramtype output_unigrams: bool
+ :keyword output_unigrams_if_no_shingles: A value indicating whether to output unigrams for
+ those times when no shingles are available. This property takes precedence when outputUnigrams
+ is set to false. Default is false.
+ :paramtype output_unigrams_if_no_shingles: bool
+ :keyword token_separator: The string to use when joining adjacent tokens to form a shingle.
+ Default is a single space (" ").
+ :paramtype token_separator: str
+ :keyword filter_token: The string to insert for each position at which there is no token.
+ Default is an underscore ("_").
+ :paramtype filter_token: str
+ """
super(ShingleTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.ShingleTokenFilter' # type: str
self.max_shingle_size = max_shingle_size
@@ -6318,23 +8096,48 @@ def __init__(
self.filter_token = filter_token
+class SkillNames(msrest.serialization.Model):
+ """SkillNames.
+
+ :ivar skill_names: the names of skills to be reset.
+ :vartype skill_names: list[str]
+ """
+
+ _attribute_map = {
+ 'skill_names': {'key': 'skillNames', 'type': '[str]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ skill_names: Optional[List[str]] = None,
+ **kwargs
+ ):
+ """
+ :keyword skill_names: the names of skills to be reset.
+ :paramtype skill_names: list[str]
+ """
+ super(SkillNames, self).__init__(**kwargs)
+ self.skill_names = skill_names
+
+
class SnowballTokenFilter(TokenFilter):
"""A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword language: Required. The language to use. Possible values include: "armenian",
- "basque", "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2",
- "hungarian", "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian",
- "russian", "spanish", "swedish", "turkish".
- :paramtype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage
+ :vartype name: str
+ :ivar language: Required. The language to use. Possible values include: "armenian", "basque",
+ "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", "hungarian",
+ "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", "russian",
+ "spanish", "swedish", "turkish".
+ :vartype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage
"""
_validation = {
@@ -6356,6 +8159,17 @@ def __init__(
language: Union[str, "SnowballTokenFilterLanguage"],
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword language: Required. The language to use. Possible values include: "armenian",
+ "basque", "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2",
+ "hungarian", "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian",
+ "russian", "spanish", "swedish", "turkish".
+ :paramtype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage
+ """
super(SnowballTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.SnowballTokenFilter' # type: str
self.language = language
@@ -6366,13 +8180,13 @@ class SoftDeleteColumnDeletionDetectionPolicy(DataDeletionDetectionPolicy):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the data deletion detection
+ :ivar odata_type: Required. Identifies the concrete type of the data deletion detection
policy.Constant filled by server.
- :paramtype odata_type: str
- :keyword soft_delete_column_name: The name of the column to use for soft-deletion detection.
- :paramtype soft_delete_column_name: str
- :keyword soft_delete_marker_value: The marker value that identifies an item as deleted.
- :paramtype soft_delete_marker_value: str
+ :vartype odata_type: str
+ :ivar soft_delete_column_name: The name of the column to use for soft-deletion detection.
+ :vartype soft_delete_column_name: str
+ :ivar soft_delete_marker_value: The marker value that identifies an item as deleted.
+ :vartype soft_delete_marker_value: str
"""
_validation = {
@@ -6392,6 +8206,12 @@ def __init__(
soft_delete_marker_value: Optional[str] = None,
**kwargs
):
+ """
+ :keyword soft_delete_column_name: The name of the column to use for soft-deletion detection.
+ :paramtype soft_delete_column_name: str
+ :keyword soft_delete_marker_value: The marker value that identifies an item as deleted.
+ :paramtype soft_delete_marker_value: str
+ """
super(SoftDeleteColumnDeletionDetectionPolicy, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy' # type: str
self.soft_delete_column_name = soft_delete_column_name
@@ -6403,35 +8223,34 @@ class SplitSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_language_code: A value indicating which language code to use. Default is en.
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_language_code: A value indicating which language code to use. Default is en.
Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt".
- :paramtype default_language_code: str or
+ :vartype default_language_code: str or
~azure.search.documents.indexes.models.SplitSkillLanguage
- :keyword text_split_mode: A value indicating which split mode to perform. Possible values
- include: "pages", "sentences".
- :paramtype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode
- :keyword maximum_page_length: The desired maximum page length. Default is 10000.
- :paramtype maximum_page_length: int
+ :ivar text_split_mode: A value indicating which split mode to perform. Possible values include:
+ "pages", "sentences".
+ :vartype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode
+ :ivar maximum_page_length: The desired maximum page length. Default is 10000.
+ :vartype maximum_page_length: int
"""
_validation = {
@@ -6465,6 +8284,34 @@ def __init__(
maximum_page_length: Optional[int] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_language_code: A value indicating which language code to use. Default is en.
+ Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt".
+ :paramtype default_language_code: str or
+ ~azure.search.documents.indexes.models.SplitSkillLanguage
+ :keyword text_split_mode: A value indicating which split mode to perform. Possible values
+ include: "pages", "sentences".
+ :paramtype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode
+ :keyword maximum_page_length: The desired maximum page length. Default is 10000.
+ :paramtype maximum_page_length: int
+ """
super(SplitSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Text.SplitSkill' # type: str
self.default_language_code = default_language_code
@@ -6477,9 +8324,9 @@ class SqlIntegratedChangeTrackingPolicy(DataChangeDetectionPolicy):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the data change detection
+ :ivar odata_type: Required. Identifies the concrete type of the data change detection
policy.Constant filled by server.
- :paramtype odata_type: str
+ :vartype odata_type: str
"""
_validation = {
@@ -6494,6 +8341,8 @@ def __init__(
self,
**kwargs
):
+ """
+ """
super(SqlIntegratedChangeTrackingPolicy, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' # type: str
@@ -6503,16 +8352,16 @@ class StemmerOverrideTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword rules: Required. A list of stemming rules in the following format: "word => stem", for
+ :vartype name: str
+ :ivar rules: Required. A list of stemming rules in the following format: "word => stem", for
example: "ran => run".
- :paramtype rules: list[str]
+ :vartype rules: list[str]
"""
_validation = {
@@ -6534,6 +8383,15 @@ def __init__(
rules: List[str],
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword rules: Required. A list of stemming rules in the following format: "word => stem", for
+ example: "ran => run".
+ :paramtype rules: list[str]
+ """
super(StemmerOverrideTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' # type: str
self.rules = rules
@@ -6544,23 +8402,23 @@ class StemmerTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword language: Required. The language to use. Possible values include: "arabic",
- "armenian", "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch",
- "dutchKp", "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2",
- "lovins", "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician",
+ :vartype name: str
+ :ivar language: Required. The language to use. Possible values include: "arabic", "armenian",
+ "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "dutchKp",
+ "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", "lovins",
+ "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician",
"minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi",
"hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani",
"latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk",
"portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian",
"lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish".
- :paramtype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage
+ :vartype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage
"""
_validation = {
@@ -6582,6 +8440,22 @@ def __init__(
language: Union[str, "StemmerTokenFilterLanguage"],
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword language: Required. The language to use. Possible values include: "arabic",
+ "armenian", "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch",
+ "dutchKp", "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2",
+ "lovins", "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician",
+ "minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi",
+ "hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani",
+ "latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk",
+ "portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian",
+ "lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish".
+ :paramtype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage
+ """
super(StemmerTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.StemmerTokenFilter' # type: str
self.language = language
@@ -6592,15 +8466,15 @@ class StopAnalyzer(LexicalAnalyzer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword stopwords: A list of stopwords.
- :paramtype stopwords: list[str]
+ :vartype odata_type: str
+ :ivar name: Required. The name of the analyzer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar stopwords: A list of stopwords.
+ :vartype stopwords: list[str]
"""
_validation = {
@@ -6621,6 +8495,14 @@ def __init__(
stopwords: Optional[List[str]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the analyzer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword stopwords: A list of stopwords.
+ :paramtype stopwords: list[str]
+ """
super(StopAnalyzer, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.StopAnalyzer' # type: str
self.stopwords = stopwords
@@ -6631,29 +8513,29 @@ class StopwordsTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword stopwords: The list of stopwords. This property and the stopwords list property cannot
+ :vartype name: str
+ :ivar stopwords: The list of stopwords. This property and the stopwords list property cannot
both be set.
- :paramtype stopwords: list[str]
- :keyword stopwords_list: A predefined list of stopwords to use. This property and the stopwords
+ :vartype stopwords: list[str]
+ :ivar stopwords_list: A predefined list of stopwords to use. This property and the stopwords
property cannot both be set. Default is English. Possible values include: "arabic", "armenian",
"basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english",
"finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian",
"irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian",
"sorani", "spanish", "swedish", "thai", "turkish".
- :paramtype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList
- :keyword ignore_case: A value indicating whether to ignore case. If true, all words are
- converted to lower case first. Default is false.
- :paramtype ignore_case: bool
- :keyword remove_trailing_stop_words: A value indicating whether to ignore the last search term
- if it's a stop word. Default is true.
- :paramtype remove_trailing_stop_words: bool
+ :vartype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList
+ :ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted
+ to lower case first. Default is false.
+ :vartype ignore_case: bool
+ :ivar remove_trailing_stop_words: A value indicating whether to ignore the last search term if
+ it's a stop word. Default is true.
+ :vartype remove_trailing_stop_words: bool
"""
_validation = {
@@ -6680,6 +8562,28 @@ def __init__(
remove_trailing_stop_words: Optional[bool] = True,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword stopwords: The list of stopwords. This property and the stopwords list property cannot
+ both be set.
+ :paramtype stopwords: list[str]
+ :keyword stopwords_list: A predefined list of stopwords to use. This property and the stopwords
+ property cannot both be set. Default is English. Possible values include: "arabic", "armenian",
+ "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english",
+ "finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian",
+ "irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian",
+ "sorani", "spanish", "swedish", "thai", "turkish".
+ :paramtype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList
+ :keyword ignore_case: A value indicating whether to ignore case. If true, all words are
+ converted to lower case first. Default is false.
+ :paramtype ignore_case: bool
+ :keyword remove_trailing_stop_words: A value indicating whether to ignore the last search term
+ if it's a stop word. Default is true.
+ :paramtype remove_trailing_stop_words: bool
+ """
super(StopwordsTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.StopwordsTokenFilter' # type: str
self.stopwords = stopwords
@@ -6695,14 +8599,14 @@ class Suggester(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the suggester.
- :paramtype name: str
+ :ivar name: Required. The name of the suggester.
+ :vartype name: str
:ivar search_mode: A value indicating the capabilities of the suggester. Has constant value:
"analyzingInfixMatching".
:vartype search_mode: str
- :keyword source_fields: Required. The list of field names to which the suggester applies. Each
+ :ivar source_fields: Required. The list of field names to which the suggester applies. Each
field must be searchable.
- :paramtype source_fields: list[str]
+ :vartype source_fields: list[str]
"""
_validation = {
@@ -6726,6 +8630,13 @@ def __init__(
source_fields: List[str],
**kwargs
):
+ """
+ :keyword name: Required. The name of the suggester.
+ :paramtype name: str
+ :keyword source_fields: Required. The list of field names to which the suggester applies. Each
+ field must be searchable.
+ :paramtype source_fields: list[str]
+ """
super(Suggester, self).__init__(**kwargs)
self.name = name
self.source_fields = source_fields
@@ -6738,15 +8649,15 @@ class SynonymMap(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword name: Required. The name of the synonym map.
- :paramtype name: str
+ :ivar name: Required. The name of the synonym map.
+ :vartype name: str
:ivar format: The format of the synonym map. Only the 'solr' format is currently supported. Has
constant value: "solr".
:vartype format: str
- :keyword synonyms: Required. A series of synonym rules in the specified synonym map format. The
+ :ivar synonyms: Required. A series of synonym rules in the specified synonym map format. The
rules must be separated by newlines.
- :paramtype synonyms: str
- :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ :vartype synonyms: str
+ :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your data when you
want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
@@ -6754,9 +8665,9 @@ class SynonymMap(msrest.serialization.Model):
needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
customer-managed keys is not available for free search services, and is only available for paid
services created on or after January 1, 2019.
- :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
- :keyword e_tag: The ETag of the synonym map.
- :paramtype e_tag: str
+ :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :ivar e_tag: The ETag of the synonym map.
+ :vartype e_tag: str
"""
_validation = {
@@ -6784,6 +8695,24 @@ def __init__(
e_tag: Optional[str] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the synonym map.
+ :paramtype name: str
+ :keyword synonyms: Required. A series of synonym rules in the specified synonym map format. The
+ rules must be separated by newlines.
+ :paramtype synonyms: str
+ :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
+ This key is used to provide an additional level of encryption-at-rest for your data when you
+ want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
+ Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
+ Search will ignore attempts to set this property to null. You can change this property as
+ needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
+ customer-managed keys is not available for free search services, and is only available for paid
+ services created on or after January 1, 2019.
+ :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
+ :keyword e_tag: The ETag of the synonym map.
+ :paramtype e_tag: str
+ """
super(SynonymMap, self).__init__(**kwargs)
self.name = name
self.synonyms = synonyms
@@ -6796,30 +8725,30 @@ class SynonymTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword synonyms: Required. A list of synonyms in following one of two formats: 1. incredible,
+ :vartype name: str
+ :ivar synonyms: Required. A list of synonyms in following one of two formats: 1. incredible,
unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced
with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma
separated list of equivalent words. Set the expand option to change how this list is
interpreted.
- :paramtype synonyms: list[str]
- :keyword ignore_case: A value indicating whether to case-fold input for matching. Default is
+ :vartype synonyms: list[str]
+ :ivar ignore_case: A value indicating whether to case-fold input for matching. Default is
false.
- :paramtype ignore_case: bool
- :keyword expand: A value indicating whether all words in the list of synonyms (if => notation
- is not used) will map to one another. If true, all words in the list of synonyms (if =>
- notation is not used) will map to one another. The following list: incredible, unbelievable,
- fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible,
+ :vartype ignore_case: bool
+ :ivar expand: A value indicating whether all words in the list of synonyms (if => notation is
+ not used) will map to one another. If true, all words in the list of synonyms (if => notation
+ is not used) will map to one another. The following list: incredible, unbelievable, fabulous,
+ amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible,
unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable,
fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing =>
incredible. Default is true.
- :paramtype expand: bool
+ :vartype expand: bool
"""
_validation = {
@@ -6845,6 +8774,29 @@ def __init__(
expand: Optional[bool] = True,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword synonyms: Required. A list of synonyms in following one of two formats: 1. incredible,
+ unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced
+ with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma
+ separated list of equivalent words. Set the expand option to change how this list is
+ interpreted.
+ :paramtype synonyms: list[str]
+ :keyword ignore_case: A value indicating whether to case-fold input for matching. Default is
+ false.
+ :paramtype ignore_case: bool
+ :keyword expand: A value indicating whether all words in the list of synonyms (if => notation
+ is not used) will map to one another. If true, all words in the list of synonyms (if =>
+ notation is not used) will map to one another. The following list: incredible, unbelievable,
+ fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible,
+ unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable,
+ fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing =>
+ incredible. Default is true.
+ :paramtype expand: bool
+ """
super(SynonymTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.SynonymTokenFilter' # type: str
self.synonyms = synonyms
@@ -6857,21 +8809,21 @@ class TagScoringFunction(ScoringFunction):
All required parameters must be populated in order to send to Azure.
- :keyword type: Required. Indicates the type of function to use. Valid values include magnitude,
+ :ivar type: Required. Indicates the type of function to use. Valid values include magnitude,
freshness, distance, and tag. The function type must be lower case.Constant filled by server.
- :paramtype type: str
- :keyword field_name: Required. The name of the field used as input to the scoring function.
- :paramtype field_name: str
- :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
- to 1.0.
- :paramtype boost: float
- :keyword interpolation: A value indicating how boosting will be interpolated across document
+ :vartype type: str
+ :ivar field_name: Required. The name of the field used as input to the scoring function.
+ :vartype field_name: str
+ :ivar boost: Required. A multiplier for the raw score. Must be a positive number not equal to
+ 1.0.
+ :vartype boost: float
+ :ivar interpolation: A value indicating how boosting will be interpolated across document
scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
"logarithmic".
- :paramtype interpolation: str or
+ :vartype interpolation: str or
~azure.search.documents.indexes.models.ScoringFunctionInterpolation
- :keyword parameters: Required. Parameter values for the tag scoring function.
- :paramtype parameters: ~azure.search.documents.indexes.models.TagScoringParameters
+ :ivar parameters: Required. Parameter values for the tag scoring function.
+ :vartype parameters: ~azure.search.documents.indexes.models.TagScoringParameters
"""
_validation = {
@@ -6898,6 +8850,20 @@ def __init__(
interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None,
**kwargs
):
+ """
+ :keyword field_name: Required. The name of the field used as input to the scoring function.
+ :paramtype field_name: str
+ :keyword boost: Required. A multiplier for the raw score. Must be a positive number not equal
+ to 1.0.
+ :paramtype boost: float
+ :keyword interpolation: A value indicating how boosting will be interpolated across document
+ scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic",
+ "logarithmic".
+ :paramtype interpolation: str or
+ ~azure.search.documents.indexes.models.ScoringFunctionInterpolation
+ :keyword parameters: Required. Parameter values for the tag scoring function.
+ :paramtype parameters: ~azure.search.documents.indexes.models.TagScoringParameters
+ """
super(TagScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs)
self.type = 'tag' # type: str
self.parameters = parameters
@@ -6908,9 +8874,9 @@ class TagScoringParameters(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword tags_parameter: Required. The name of the parameter passed in search queries to
- specify the list of tags to compare against the target field.
- :paramtype tags_parameter: str
+ :ivar tags_parameter: Required. The name of the parameter passed in search queries to specify
+ the list of tags to compare against the target field.
+ :vartype tags_parameter: str
"""
_validation = {
@@ -6927,6 +8893,11 @@ def __init__(
tags_parameter: str,
**kwargs
):
+ """
+ :keyword tags_parameter: Required. The name of the parameter passed in search queries to
+ specify the list of tags to compare against the target field.
+ :paramtype tags_parameter: str
+ """
super(TagScoringParameters, self).__init__(**kwargs)
self.tags_parameter = tags_parameter
@@ -6936,45 +8907,44 @@ class TextTranslationSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword default_to_language_code: Required. The language code to translate documents into for
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar default_to_language_code: Required. The language code to translate documents into for
documents that don't specify the to language explicitly. Possible values include: "af", "ar",
"bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj",
"fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw",
"tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt",
"pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty",
"ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
- :paramtype default_to_language_code: str or
+ :vartype default_to_language_code: str or
~azure.search.documents.indexes.models.TextTranslationSkillLanguage
- :keyword default_from_language_code: The language code to translate documents from for
- documents that don't specify the from language explicitly. Possible values include: "af", "ar",
- "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj",
- "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw",
- "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt",
- "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty",
- "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
- :paramtype default_from_language_code: str or
+ :ivar default_from_language_code: The language code to translate documents from for documents
+ that don't specify the from language explicitly. Possible values include: "af", "ar", "bn",
+ "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil",
+ "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh",
+ "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br",
+ "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta",
+ "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
+ :vartype default_from_language_code: str or
~azure.search.documents.indexes.models.TextTranslationSkillLanguage
- :keyword suggested_from: The language code to translate documents from when neither the
+ :ivar suggested_from: The language code to translate documents from when neither the
fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the
automatic language detection is unsuccessful. Default is en. Possible values include: "af",
"ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et",
@@ -6983,7 +8953,7 @@ class TextTranslationSkill(SearchIndexerSkill):
"pt", "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv",
"ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml",
"pa".
- :paramtype suggested_from: str or
+ :vartype suggested_from: str or
~azure.search.documents.indexes.models.TextTranslationSkillLanguage
"""
@@ -7019,6 +8989,54 @@ def __init__(
suggested_from: Optional[Union[str, "TextTranslationSkillLanguage"]] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword default_to_language_code: Required. The language code to translate documents into for
+ documents that don't specify the to language explicitly. Possible values include: "af", "ar",
+ "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj",
+ "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw",
+ "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt",
+ "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty",
+ "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
+ :paramtype default_to_language_code: str or
+ ~azure.search.documents.indexes.models.TextTranslationSkillLanguage
+ :keyword default_from_language_code: The language code to translate documents from for
+ documents that don't specify the from language explicitly. Possible values include: "af", "ar",
+ "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj",
+ "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw",
+ "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt",
+ "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty",
+ "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa".
+ :paramtype default_from_language_code: str or
+ ~azure.search.documents.indexes.models.TextTranslationSkillLanguage
+ :keyword suggested_from: The language code to translate documents from when neither the
+ fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the
+ automatic language detection is unsuccessful. Default is en. Possible values include: "af",
+ "ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et",
+ "fj", "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja",
+ "sw", "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl",
+ "pt", "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv",
+ "ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml",
+ "pa".
+ :paramtype suggested_from: str or
+ ~azure.search.documents.indexes.models.TextTranslationSkillLanguage
+ """
super(TextTranslationSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Text.TranslationSkill' # type: str
self.default_to_language_code = default_to_language_code
@@ -7031,9 +9049,9 @@ class TextWeights(msrest.serialization.Model):
All required parameters must be populated in order to send to Azure.
- :keyword weights: Required. The dictionary of per-field weights to boost document scoring. The
+ :ivar weights: Required. The dictionary of per-field weights to boost document scoring. The
keys are field names and the values are the weights for each field.
- :paramtype weights: dict[str, float]
+ :vartype weights: dict[str, float]
"""
_validation = {
@@ -7050,6 +9068,11 @@ def __init__(
weights: Dict[str, float],
**kwargs
):
+ """
+ :keyword weights: Required. The dictionary of per-field weights to boost document scoring. The
+ keys are field names and the values are the weights for each field.
+ :paramtype weights: dict[str, float]
+ """
super(TextWeights, self).__init__(**kwargs)
self.weights = weights
@@ -7059,15 +9082,15 @@ class TruncateTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword length: The length at which terms will be truncated. Default and maximum is 300.
- :paramtype length: int
+ :vartype name: str
+ :ivar length: The length at which terms will be truncated. Default and maximum is 300.
+ :vartype length: int
"""
_validation = {
@@ -7089,6 +9112,14 @@ def __init__(
length: Optional[int] = 300,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword length: The length at which terms will be truncated. Default and maximum is 300.
+ :paramtype length: int
+ """
super(TruncateTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.TruncateTokenFilter' # type: str
self.length = length
@@ -7099,16 +9130,16 @@ class UaxUrlEmailTokenizer(LexicalTokenizer):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
- spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
- limited to 128 characters.
- :paramtype name: str
- :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ :vartype odata_type: str
+ :ivar name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
+ dashes or underscores, can only start and end with alphanumeric characters, and is limited to
+ 128 characters.
+ :vartype name: str
+ :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the
maximum length are split. The maximum token length that can be used is 300 characters.
- :paramtype max_token_length: int
+ :vartype max_token_length: int
"""
_validation = {
@@ -7130,6 +9161,15 @@ def __init__(
max_token_length: Optional[int] = 255,
**kwargs
):
+ """
+ :keyword name: Required. The name of the tokenizer. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the
+ maximum length are split. The maximum token length that can be used is 300 characters.
+ :paramtype max_token_length: int
+ """
super(UaxUrlEmailTokenizer, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.UaxUrlEmailTokenizer' # type: str
self.max_token_length = max_token_length
@@ -7140,16 +9180,16 @@ class UniqueTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword only_on_same_position: A value indicating whether to remove duplicates only at the
- same position. Default is false.
- :paramtype only_on_same_position: bool
+ :vartype name: str
+ :ivar only_on_same_position: A value indicating whether to remove duplicates only at the same
+ position. Default is false.
+ :vartype only_on_same_position: bool
"""
_validation = {
@@ -7170,6 +9210,15 @@ def __init__(
only_on_same_position: Optional[bool] = False,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword only_on_same_position: A value indicating whether to remove duplicates only at the
+ same position. Default is false.
+ :paramtype only_on_same_position: bool
+ """
super(UniqueTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.UniqueTokenFilter' # type: str
self.only_on_same_position = only_on_same_position
@@ -7180,39 +9229,38 @@ class WebApiSkill(SearchIndexerSkill):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the skill.Constant filled by
+ :ivar odata_type: Required. Identifies the concrete type of the skill.Constant filled by
server.
- :paramtype odata_type: str
- :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ :vartype odata_type: str
+ :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill
with no name defined will be given a default name of its 1-based index in the skills array,
prefixed with the character '#'.
- :paramtype name: str
- :keyword description: The description of the skill which describes the inputs, outputs, and
- usage of the skill.
- :paramtype description: str
- :keyword context: Represents the level at which operations take place, such as the document
- root or document content (for example, /document or /document/content). The default is
- /document.
- :paramtype context: str
- :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
- the output of an upstream skill.
- :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
- :keyword outputs: Required. The output of a skill is either a field in a search index, or a
- value that can be consumed as an input by another skill.
- :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
- :keyword uri: Required. The url for the Web API.
- :paramtype uri: str
- :keyword http_headers: The headers required to make the http request.
- :paramtype http_headers: dict[str, str]
- :keyword http_method: The method for the http request.
- :paramtype http_method: str
- :keyword timeout: The desired timeout for the request. Default is 30 seconds.
- :paramtype timeout: ~datetime.timedelta
- :keyword batch_size: The desired batch size which indicates number of documents.
- :paramtype batch_size: int
- :keyword degree_of_parallelism: If set, the number of parallel calls that can be made to the
- Web API.
- :paramtype degree_of_parallelism: int
+ :vartype name: str
+ :ivar description: The description of the skill which describes the inputs, outputs, and usage
+ of the skill.
+ :vartype description: str
+ :ivar context: Represents the level at which operations take place, such as the document root
+ or document content (for example, /document or /document/content). The default is /document.
+ :vartype context: str
+ :ivar inputs: Required. Inputs of the skills could be a column in the source data set, or the
+ output of an upstream skill.
+ :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :ivar outputs: Required. The output of a skill is either a field in a search index, or a value
+ that can be consumed as an input by another skill.
+ :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :ivar uri: Required. The url for the Web API.
+ :vartype uri: str
+ :ivar http_headers: The headers required to make the http request.
+ :vartype http_headers: dict[str, str]
+ :ivar http_method: The method for the http request.
+ :vartype http_method: str
+ :ivar timeout: The desired timeout for the request. Default is 30 seconds.
+ :vartype timeout: ~datetime.timedelta
+ :ivar batch_size: The desired batch size which indicates number of documents.
+ :vartype batch_size: int
+ :ivar degree_of_parallelism: If set, the number of parallel calls that can be made to the Web
+ API.
+ :vartype degree_of_parallelism: int
"""
_validation = {
@@ -7253,6 +9301,38 @@ def __init__(
degree_of_parallelism: Optional[int] = None,
**kwargs
):
+ """
+ :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill
+ with no name defined will be given a default name of its 1-based index in the skills array,
+ prefixed with the character '#'.
+ :paramtype name: str
+ :keyword description: The description of the skill which describes the inputs, outputs, and
+ usage of the skill.
+ :paramtype description: str
+ :keyword context: Represents the level at which operations take place, such as the document
+ root or document content (for example, /document or /document/content). The default is
+ /document.
+ :paramtype context: str
+ :keyword inputs: Required. Inputs of the skills could be a column in the source data set, or
+ the output of an upstream skill.
+ :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry]
+ :keyword outputs: Required. The output of a skill is either a field in a search index, or a
+ value that can be consumed as an input by another skill.
+ :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry]
+ :keyword uri: Required. The url for the Web API.
+ :paramtype uri: str
+ :keyword http_headers: The headers required to make the http request.
+ :paramtype http_headers: dict[str, str]
+ :keyword http_method: The method for the http request.
+ :paramtype http_method: str
+ :keyword timeout: The desired timeout for the request. Default is 30 seconds.
+ :paramtype timeout: ~datetime.timedelta
+ :keyword batch_size: The desired batch size which indicates number of documents.
+ :paramtype batch_size: int
+ :keyword degree_of_parallelism: If set, the number of parallel calls that can be made to the
+ Web API.
+ :paramtype degree_of_parallelism: int
+ """
super(WebApiSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs)
self.odata_type = '#Microsoft.Skills.Custom.WebApiSkill' # type: str
self.uri = uri
@@ -7268,44 +9348,43 @@ class WordDelimiterTokenFilter(TokenFilter):
All required parameters must be populated in order to send to Azure.
- :keyword odata_type: Required. Identifies the concrete type of the token filter.Constant filled
- by server.
- :paramtype odata_type: str
- :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ :ivar odata_type: Required. Identifies the concrete type of the token filter.Constant filled by
+ server.
+ :vartype odata_type: str
+ :ivar name: Required. The name of the token filter. It must only contain letters, digits,
spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
limited to 128 characters.
- :paramtype name: str
- :keyword generate_word_parts: A value indicating whether to generate part words. If set, causes
+ :vartype name: str
+ :ivar generate_word_parts: A value indicating whether to generate part words. If set, causes
parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is
true.
- :paramtype generate_word_parts: bool
- :keyword generate_number_parts: A value indicating whether to generate number subwords. Default
- is true.
- :paramtype generate_number_parts: bool
- :keyword catenate_words: A value indicating whether maximum runs of word parts will be
- catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default
- is false.
- :paramtype catenate_words: bool
- :keyword catenate_numbers: A value indicating whether maximum runs of number parts will be
+ :vartype generate_word_parts: bool
+ :ivar generate_number_parts: A value indicating whether to generate number subwords. Default is
+ true.
+ :vartype generate_number_parts: bool
+ :ivar catenate_words: A value indicating whether maximum runs of word parts will be catenated.
+ For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false.
+ :vartype catenate_words: bool
+ :ivar catenate_numbers: A value indicating whether maximum runs of number parts will be
catenated. For example, if this is set to true, "1-2" becomes "12". Default is false.
- :paramtype catenate_numbers: bool
- :keyword catenate_all: A value indicating whether all subword parts will be catenated. For
+ :vartype catenate_numbers: bool
+ :ivar catenate_all: A value indicating whether all subword parts will be catenated. For
example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false.
- :paramtype catenate_all: bool
- :keyword split_on_case_change: A value indicating whether to split words on caseChange. For
+ :vartype catenate_all: bool
+ :ivar split_on_case_change: A value indicating whether to split words on caseChange. For
example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true.
- :paramtype split_on_case_change: bool
- :keyword preserve_original: A value indicating whether original words will be preserved and
- added to the subword list. Default is false.
- :paramtype preserve_original: bool
- :keyword split_on_numerics: A value indicating whether to split on numbers. For example, if
- this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true.
- :paramtype split_on_numerics: bool
- :keyword stem_english_possessive: A value indicating whether to remove trailing "'s" for each
+ :vartype split_on_case_change: bool
+ :ivar preserve_original: A value indicating whether original words will be preserved and added
+ to the subword list. Default is false.
+ :vartype preserve_original: bool
+ :ivar split_on_numerics: A value indicating whether to split on numbers. For example, if this
+ is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true.
+ :vartype split_on_numerics: bool
+ :ivar stem_english_possessive: A value indicating whether to remove trailing "'s" for each
subword. Default is true.
- :paramtype stem_english_possessive: bool
- :keyword protected_words: A list of tokens to protect from being delimited.
- :paramtype protected_words: list[str]
+ :vartype stem_english_possessive: bool
+ :ivar protected_words: A list of tokens to protect from being delimited.
+ :vartype protected_words: list[str]
"""
_validation = {
@@ -7344,6 +9423,43 @@ def __init__(
protected_words: Optional[List[str]] = None,
**kwargs
):
+ """
+ :keyword name: Required. The name of the token filter. It must only contain letters, digits,
+ spaces, dashes or underscores, can only start and end with alphanumeric characters, and is
+ limited to 128 characters.
+ :paramtype name: str
+ :keyword generate_word_parts: A value indicating whether to generate part words. If set, causes
+ parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is
+ true.
+ :paramtype generate_word_parts: bool
+ :keyword generate_number_parts: A value indicating whether to generate number subwords. Default
+ is true.
+ :paramtype generate_number_parts: bool
+ :keyword catenate_words: A value indicating whether maximum runs of word parts will be
+ catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default
+ is false.
+ :paramtype catenate_words: bool
+ :keyword catenate_numbers: A value indicating whether maximum runs of number parts will be
+ catenated. For example, if this is set to true, "1-2" becomes "12". Default is false.
+ :paramtype catenate_numbers: bool
+ :keyword catenate_all: A value indicating whether all subword parts will be catenated. For
+ example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false.
+ :paramtype catenate_all: bool
+ :keyword split_on_case_change: A value indicating whether to split words on caseChange. For
+ example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true.
+ :paramtype split_on_case_change: bool
+ :keyword preserve_original: A value indicating whether original words will be preserved and
+ added to the subword list. Default is false.
+ :paramtype preserve_original: bool
+ :keyword split_on_numerics: A value indicating whether to split on numbers. For example, if
+ this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true.
+ :paramtype split_on_numerics: bool
+ :keyword stem_english_possessive: A value indicating whether to remove trailing "'s" for each
+ subword. Default is true.
+ :paramtype stem_english_possessive: bool
+ :keyword protected_words: A list of tokens to protect from being delimited.
+ :paramtype protected_words: list[str]
+ """
super(WordDelimiterTokenFilter, self).__init__(name=name, **kwargs)
self.odata_type = '#Microsoft.Azure.Search.WordDelimiterTokenFilter' # type: str
self.generate_word_parts = generate_word_parts
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py
index 85a4b01663ec..f0fca120b0d5 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py
@@ -12,12 +12,12 @@
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
-from azure.core.pipeline.transport._base import _format_url_section
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
+from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
@@ -38,7 +38,7 @@ def build_create_or_update_request(
x_ms_client_request_id = kwargs.pop('x_ms_client_request_id', None) # type: Optional[str]
if_match = kwargs.pop('if_match', None) # type: Optional[str]
if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str]
- ignore_reset_requirements = kwargs.pop('ignore_reset_requirements', None) # type: Optional[bool]
+ skip_indexer_reset_requirement_for_cache = kwargs.pop('skip_indexer_reset_requirement_for_cache', None) # type: Optional[bool]
prefer = "return=representation"
api_version = "2021-04-30-Preview"
@@ -54,8 +54,8 @@ def build_create_or_update_request(
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
- if ignore_reset_requirements is not None:
- query_parameters['ignoreResetRequirements'] = _SERIALIZER.query("ignore_reset_requirements", ignore_reset_requirements, 'bool')
+ if skip_indexer_reset_requirement_for_cache is not None:
+ query_parameters['ignoreResetRequirements'] = _SERIALIZER.query("skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
@@ -252,7 +252,7 @@ def create_or_update(
data_source, # type: "_models.SearchIndexerDataSource"
if_match=None, # type: Optional[str]
if_none_match=None, # type: Optional[str]
- ignore_reset_requirements=None, # type: Optional[bool]
+ skip_indexer_reset_requirement_for_cache=None, # type: Optional[bool]
request_options=None, # type: Optional["_models.RequestOptions"]
**kwargs # type: Any
):
@@ -269,8 +269,8 @@ def create_or_update(
:param if_none_match: Defines the If-None-Match condition. The operation will be performed only
if the ETag on the server does not match this value.
:type if_none_match: str
- :param ignore_reset_requirements: Ignores cache reset requirements.
- :type ignore_reset_requirements: bool
+ :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements.
+ :type skip_indexer_reset_requirement_for_cache: bool
:param request_options: Parameter group.
:type request_options: ~azure.search.documents.indexes.models.RequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
@@ -297,21 +297,22 @@ def create_or_update(
x_ms_client_request_id=_x_ms_client_request_id,
if_match=if_match,
if_none_match=if_none_match,
- ignore_reset_requirements=ignore_reset_requirements,
+ skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache,
json=json,
template_url=self.create_or_update.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
@@ -371,18 +372,19 @@ def delete(
if_match=if_match,
if_none_match=if_none_match,
template_url=self.delete.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -424,18 +426,19 @@ def get(
data_source_name=data_source_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response)
@@ -483,18 +486,19 @@ def list(
select=select,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.list.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ListDataSourcesResult', pipeline_response)
@@ -544,18 +548,19 @@ def create(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.create.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py
index ae85d03e1d14..30a3129eec29 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py
@@ -12,12 +12,12 @@
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
-from azure.core.pipeline.transport._base import _format_url_section
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
+from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
@@ -152,8 +152,8 @@ def build_create_or_update_request(
x_ms_client_request_id = kwargs.pop('x_ms_client_request_id', None) # type: Optional[str]
if_match = kwargs.pop('if_match', None) # type: Optional[str]
if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str]
+ skip_indexer_reset_requirement_for_cache = kwargs.pop('skip_indexer_reset_requirement_for_cache', None) # type: Optional[bool]
disable_cache_reprocessing_change_detection = kwargs.pop('disable_cache_reprocessing_change_detection', None) # type: Optional[bool]
- ignore_reset_requirements = kwargs.pop('ignore_reset_requirements', None) # type: Optional[bool]
prefer = "return=representation"
api_version = "2021-04-30-Preview"
@@ -169,10 +169,10 @@ def build_create_or_update_request(
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
+ if skip_indexer_reset_requirement_for_cache is not None:
+ query_parameters['ignoreResetRequirements'] = _SERIALIZER.query("skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, 'bool')
if disable_cache_reprocessing_change_detection is not None:
query_parameters['disableCacheReprocessingChangeDetection'] = _SERIALIZER.query("disable_cache_reprocessing_change_detection", disable_cache_reprocessing_change_detection, 'bool')
- if ignore_reset_requirements is not None:
- query_parameters['ignoreResetRequirements'] = _SERIALIZER.query("ignore_reset_requirements", ignore_reset_requirements, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
@@ -431,18 +431,19 @@ def reset(
indexer_name=indexer_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.reset.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -456,7 +457,7 @@ def reset_docs(
self,
indexer_name, # type: str
overwrite=False, # type: Optional[bool]
- keys_or_ids=None, # type: Optional["_models.Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema"]
+ keys_or_ids=None, # type: Optional["_models.DocumentKeysOrIds"]
request_options=None, # type: Optional["_models.RequestOptions"]
**kwargs # type: Any
):
@@ -469,8 +470,7 @@ def reset_docs(
keys or ids in this payload will be queued to be re-ingested.
:type overwrite: bool
:param keys_or_ids:
- :type keys_or_ids:
- ~azure.search.documents.indexes.models.Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema
+ :type keys_or_ids: ~azure.search.documents.indexes.models.DocumentKeysOrIds
:param request_options: Parameter group.
:type request_options: ~azure.search.documents.indexes.models.RequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
@@ -490,7 +490,7 @@ def reset_docs(
if request_options is not None:
_x_ms_client_request_id = request_options.x_ms_client_request_id
if keys_or_ids is not None:
- json = self._serialize.body(keys_or_ids, 'Paths1Cj7DxmIndexersIndexernameSearchResetdocsPostRequestbodyContentApplicationJsonSchema')
+ json = self._serialize.body(keys_or_ids, 'DocumentKeysOrIds')
else:
json = None
@@ -501,18 +501,19 @@ def reset_docs(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.reset_docs.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -554,18 +555,19 @@ def run(
indexer_name=indexer_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.run.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -581,8 +583,8 @@ def create_or_update(
indexer, # type: "_models.SearchIndexer"
if_match=None, # type: Optional[str]
if_none_match=None, # type: Optional[str]
+ skip_indexer_reset_requirement_for_cache=None, # type: Optional[bool]
disable_cache_reprocessing_change_detection=None, # type: Optional[bool]
- ignore_reset_requirements=None, # type: Optional[bool]
request_options=None, # type: Optional["_models.RequestOptions"]
**kwargs # type: Any
):
@@ -599,11 +601,11 @@ def create_or_update(
:param if_none_match: Defines the If-None-Match condition. The operation will be performed only
if the ETag on the server does not match this value.
:type if_none_match: str
+ :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements.
+ :type skip_indexer_reset_requirement_for_cache: bool
:param disable_cache_reprocessing_change_detection: Disables cache reprocessing change
detection.
:type disable_cache_reprocessing_change_detection: bool
- :param ignore_reset_requirements: Ignores cache reset requirements.
- :type ignore_reset_requirements: bool
:param request_options: Parameter group.
:type request_options: ~azure.search.documents.indexes.models.RequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
@@ -630,22 +632,23 @@ def create_or_update(
x_ms_client_request_id=_x_ms_client_request_id,
if_match=if_match,
if_none_match=if_none_match,
+ skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache,
disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection,
- ignore_reset_requirements=ignore_reset_requirements,
json=json,
template_url=self.create_or_update.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
@@ -705,18 +708,19 @@ def delete(
if_match=if_match,
if_none_match=if_none_match,
template_url=self.delete.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -758,18 +762,19 @@ def get(
indexer_name=indexer_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexer', pipeline_response)
@@ -817,18 +822,19 @@ def list(
select=select,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.list.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ListIndexersResult', pipeline_response)
@@ -878,18 +884,19 @@ def create(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.create.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexer', pipeline_response)
@@ -935,18 +942,19 @@ def get_status(
indexer_name=indexer_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get_status.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexerStatus', pipeline_response)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py
index c5a7e11c1930..fceea50c44a2 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py
@@ -13,12 +13,12 @@
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
-from azure.core.pipeline.transport._base import _format_url_section
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
+from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
@@ -358,18 +358,19 @@ def create(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.create.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndex', pipeline_response)
@@ -418,7 +419,8 @@ def prepare_request(next_link=None):
select=select,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.list.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
@@ -433,7 +435,8 @@ def prepare_request(next_link=None):
select=select,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=next_link,
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
@@ -460,7 +463,7 @@ def get_next(next_link=None):
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
return pipeline_response
@@ -530,18 +533,19 @@ def create_or_update(
if_none_match=if_none_match,
json=json,
template_url=self.create_or_update.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
@@ -603,18 +607,19 @@ def delete(
if_match=if_match,
if_none_match=if_none_match,
template_url=self.delete.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -656,18 +661,19 @@ def get(
index_name=index_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndex', pipeline_response)
@@ -713,18 +719,19 @@ def get_statistics(
index_name=index_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get_statistics.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('GetIndexStatisticsResult', pipeline_response)
@@ -778,18 +785,19 @@ def analyze(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.analyze.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AnalyzeResult', pipeline_response)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_client_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_client_operations.py
index c717faa75ed0..48151d99bb06 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_client_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_client_operations.py
@@ -17,6 +17,7 @@
from msrest import Serializer
from .. import models as _models
+from .._vendor import _convert_request
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
@@ -89,18 +90,19 @@ def get_service_statistics(
request = build_get_service_statistics_request(
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get_service_statistics.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ServiceStatistics', pipeline_response)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py
index 55ca1000e361..5c212d7074c0 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py
@@ -12,16 +12,16 @@
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
-from azure.core.pipeline.transport._base import _format_url_section
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
+from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
+ from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -38,8 +38,8 @@ def build_create_or_update_request(
x_ms_client_request_id = kwargs.pop('x_ms_client_request_id', None) # type: Optional[str]
if_match = kwargs.pop('if_match', None) # type: Optional[str]
if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str]
+ skip_indexer_reset_requirement_for_cache = kwargs.pop('skip_indexer_reset_requirement_for_cache', None) # type: Optional[bool]
disable_cache_reprocessing_change_detection = kwargs.pop('disable_cache_reprocessing_change_detection', None) # type: Optional[bool]
- ignore_reset_requirements = kwargs.pop('ignore_reset_requirements', None) # type: Optional[bool]
prefer = "return=representation"
api_version = "2021-04-30-Preview"
@@ -55,10 +55,10 @@ def build_create_or_update_request(
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
+ if skip_indexer_reset_requirement_for_cache is not None:
+ query_parameters['ignoreResetRequirements'] = _SERIALIZER.query("skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, 'bool')
if disable_cache_reprocessing_change_detection is not None:
query_parameters['disableCacheReprocessingChangeDetection'] = _SERIALIZER.query("disable_cache_reprocessing_change_detection", disable_cache_reprocessing_change_detection, 'bool')
- if ignore_reset_requirements is not None:
- query_parameters['ignoreResetRequirements'] = _SERIALIZER.query("ignore_reset_requirements", ignore_reset_requirements, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
@@ -225,6 +225,45 @@ def build_create_request(
**kwargs
)
+
+def build_reset_skills_request(
+ skillset_name, # type: str
+ **kwargs # type: Any
+):
+ # type: (...) -> HttpRequest
+ content_type = kwargs.pop('content_type', None) # type: Optional[str]
+ x_ms_client_request_id = kwargs.pop('x_ms_client_request_id', None) # type: Optional[str]
+
+ api_version = "2021-04-30-Preview"
+ accept = "application/json"
+ # Construct URL
+ url = kwargs.pop("template_url", '/skillsets(\'{skillsetName}\')/search.resetskills')
+ path_format_arguments = {
+ "skillsetName": _SERIALIZER.url("skillset_name", skillset_name, 'str'),
+ }
+
+ url = _format_url_section(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
+ query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
+ if x_ms_client_request_id is not None:
+ header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, 'str')
+ if content_type is not None:
+ header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
+ header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
+
+ return HttpRequest(
+ method="POST",
+ url=url,
+ params=query_parameters,
+ headers=header_parameters,
+ **kwargs
+ )
+
# fmt: on
class SkillsetsOperations(object):
"""SkillsetsOperations operations.
@@ -255,8 +294,8 @@ def create_or_update(
skillset, # type: "_models.SearchIndexerSkillset"
if_match=None, # type: Optional[str]
if_none_match=None, # type: Optional[str]
+ skip_indexer_reset_requirement_for_cache=None, # type: Optional[bool]
disable_cache_reprocessing_change_detection=None, # type: Optional[bool]
- ignore_reset_requirements=None, # type: Optional[bool]
request_options=None, # type: Optional["_models.RequestOptions"]
**kwargs # type: Any
):
@@ -274,11 +313,11 @@ def create_or_update(
:param if_none_match: Defines the If-None-Match condition. The operation will be performed only
if the ETag on the server does not match this value.
:type if_none_match: str
+ :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements.
+ :type skip_indexer_reset_requirement_for_cache: bool
:param disable_cache_reprocessing_change_detection: Disables cache reprocessing change
detection.
:type disable_cache_reprocessing_change_detection: bool
- :param ignore_reset_requirements: Ignores cache reset requirements.
- :type ignore_reset_requirements: bool
:param request_options: Parameter group.
:type request_options: ~azure.search.documents.indexes.models.RequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
@@ -305,22 +344,23 @@ def create_or_update(
x_ms_client_request_id=_x_ms_client_request_id,
if_match=if_match,
if_none_match=if_none_match,
+ skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache,
disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection,
- ignore_reset_requirements=ignore_reset_requirements,
json=json,
template_url=self.create_or_update.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
@@ -380,18 +420,19 @@ def delete(
if_match=if_match,
if_none_match=if_none_match,
template_url=self.delete.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -433,18 +474,19 @@ def get(
skillset_name=skillset_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response)
@@ -492,18 +534,19 @@ def list(
select=select,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.list.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ListSkillsetsResult', pipeline_response)
@@ -553,18 +596,19 @@ def create(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.create.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response)
@@ -576,3 +620,66 @@ def create(
create.metadata = {'url': '/skillsets'} # type: ignore
+
+ @distributed_trace
+ def reset_skills(
+ self,
+ skillset_name, # type: str
+ skill_names=None, # type: Optional[List[str]]
+ request_options=None, # type: Optional["_models.RequestOptions"]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Reset an existing skillset in a search service.
+
+ :param skillset_name: The name of the skillset to reset.
+ :type skillset_name: str
+ :param skill_names: the names of skills to be reset.
+ :type skill_names: list[str]
+ :param request_options: Parameter group.
+ :type request_options: ~azure.search.documents.indexes.models.RequestOptions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
+
+ _x_ms_client_request_id = None
+ if request_options is not None:
+ _x_ms_client_request_id = request_options.x_ms_client_request_id
+ _skill_names = _models.SkillNames(skill_names=skill_names)
+ json = self._serialize.body(_skill_names, 'SkillNames')
+
+ request = build_reset_skills_request(
+ skillset_name=skillset_name,
+ content_type=content_type,
+ x_ms_client_request_id=_x_ms_client_request_id,
+ json=json,
+ template_url=self.reset_skills.metadata['url'],
+ )
+ request = _convert_request(request)
+ path_format_arguments = {
+ "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
+ }
+ request.url = self._client.format_url(request.url, **path_format_arguments)
+
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
+ raise HttpResponseError(response=response, model=error)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ reset_skills.metadata = {'url': '/skillsets(\'{skillsetName}\')/search.resetskills'} # type: ignore
+
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py
index 0ce0864491c3..fd1190e573c3 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py
@@ -12,12 +12,12 @@
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
-from azure.core.pipeline.transport._base import _format_url_section
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
+from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
@@ -293,18 +293,19 @@ def create_or_update(
if_none_match=if_none_match,
json=json,
template_url=self.create_or_update.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
@@ -364,18 +365,19 @@ def delete(
if_match=if_match,
if_none_match=if_none_match,
template_url=self.delete.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
@@ -417,18 +419,19 @@ def get(
synonym_map_name=synonym_map_name,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SynonymMap', pipeline_response)
@@ -476,18 +479,19 @@ def list(
select=select,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.list.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ListSynonymMapsResult', pipeline_response)
@@ -537,18 +541,19 @@ def create(
x_ms_client_request_id=_x_ms_client_request_id,
json=json,
template_url=self.create.metadata['url'],
- )._to_pipeline_transport_request()
+ )
+ request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
- pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize.failsafe_deserialize(_models.SearchError, response)
+ error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SynonymMap', pipeline_response)
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py
index 27a083e917f6..c7e55716c554 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py
@@ -114,6 +114,11 @@ def create_or_update_indexer(self, indexer, **kwargs):
:param indexer: The definition of the indexer to create or update.
:type indexer: ~azure.search.documents.indexes.models.SearchIndexer
+ :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements.
+ :paramtype skip_indexer_reset_requirement_for_cache: bool
+ :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change
+ detection.
+ :paramtype disable_cache_reprocessing_change_detection: bool
:return: The created IndexSearchIndexerer
:rtype: ~azure.search.documents.indexes.models.SearchIndexer
"""
@@ -330,9 +335,12 @@ def create_or_update_data_source_connection(self, data_source_connection, **kwar
:type data_source_connection: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection
:keyword match_condition: The match condition to use upon the etag
:type match_condition: ~azure.core.MatchConditions
+ :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements.
+ :paramtype skip_indexer_reset_requirement_for_cache: bool
:return: The created SearchIndexerDataSourceConnection
:rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection
"""
+
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
error_map, access_condition = get_access_conditions(
data_source_connection,
@@ -584,6 +592,11 @@ def create_or_update_skillset(self, skillset, **kwargs):
:type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset
:keyword match_condition: The match condition to use upon the etag
:type match_condition: ~azure.core.MatchConditions
+ :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements.
+ :paramtype skip_indexer_reset_requirement_for_cache: bool
+ :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change
+ detection.
+ :paramtype disable_cache_reprocessing_change_detection: bool
:return: The created or updated SearchIndexerSkillset
:rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset
diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py
index d8171269bcf6..d0e589f9a39e 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py
@@ -115,6 +115,11 @@ async def create_or_update_indexer(self, indexer, **kwargs):
:param indexer: The definition of the indexer to create or update.
:type indexer: ~azure.search.documents.indexes.models.SearchIndexer
+ :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements.
+ :paramtype skip_indexer_reset_requirement_for_cache: bool
+ :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change
+ detection.
+ :paramtype disable_cache_reprocessing_change_detection: bool
:return: The created SearchIndexer
:rtype: ~azure.search.documents.indexes.models.SearchIndexer
"""
@@ -324,6 +329,8 @@ async def create_or_update_data_source_connection(
:type data_source_connection: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection
:keyword match_condition: The match condition to use upon the etag
:type match_condition: ~azure.core.MatchConditions
+ :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements.
+ :type skip_indexer_reset_requirement_for_cache: bool
:return: The created SearchIndexerDataSourceConnection
:rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection
"""
@@ -571,6 +578,11 @@ async def create_or_update_skillset(self, skillset, **kwargs):
:type skillset: :class:`~azure.search.documents.indexes.models.SearchIndexerSkillset`
:keyword match_condition: The match condition to use upon the etag
:type match_condition: ~azure.core.MatchConditions
+ :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements.
+ :paramtype skip_indexer_reset_requirement_for_cache: bool
+ :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change
+ detection.
+ :paramtype disable_cache_reprocessing_change_detection: bool
:return: The created or updated SearchIndexerSkillset
:rtype: :class:`~azure.search.documents.indexes.models.SearchIndexerSkillset`
diff --git a/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py
index b1d2c678d771..c93b4ac66b9c 100644
--- a/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py
+++ b/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py
@@ -25,32 +25,32 @@
# --------------------------------------------------------------------------
from .._generated.models import (
- Answers,
AnswerResult,
AutocompleteMode,
- Captions,
CaptionResult,
IndexAction,
IndexingResult,
+ QueryAnswerType,
+ QueryCaptionType,
QueryLanguage,
+ QuerySpellerType,
QueryType,
SearchMode,
- Speller,
)
from .._search_client import odata
__all__ = (
- "Answers",
"AnswerResult",
"AutocompleteMode",
- "Captions",
"CaptionResult",
"IndexAction",
"IndexingResult",
"odata",
+ "QueryAnswerType",
+ "QueryCaptionType",
"QueryLanguage",
+ "QuerySpellerType",
"QueryType",
"SearchMode",
- "Speller",
)
diff --git a/sdk/search/azure-search-documents/samples/async_samples/sample_semantic_search_async.py b/sdk/search/azure-search-documents/samples/async_samples/sample_semantic_search_async.py
index 5b14f39da581..b52f21b5fba3 100644
--- a/sdk/search/azure-search-documents/samples/async_samples/sample_semantic_search_async.py
+++ b/sdk/search/azure-search-documents/samples/async_samples/sample_semantic_search_async.py
@@ -35,7 +35,7 @@ async def speller():
client = SearchClient(endpoint=endpoint,
index_name=index_name,
credential=credential)
- results = await client.search(search_text="luxucy", query_language="en-us", speller="lexicon")
+ results = await client.search(search_text="luxucy", query_language="en-us", query_speller="lexicon")
async for result in results:
print("{}\n{}\n)".format(result["HotelId"], result["HotelName"]))
diff --git a/sdk/search/azure-search-documents/samples/sample_semantic_search.py b/sdk/search/azure-search-documents/samples/sample_semantic_search.py
index 6713b25fa4d3..9acdb6e1f6bb 100644
--- a/sdk/search/azure-search-documents/samples/sample_semantic_search.py
+++ b/sdk/search/azure-search-documents/samples/sample_semantic_search.py
@@ -34,7 +34,7 @@ def speller():
client = SearchClient(endpoint=endpoint,
index_name=index_name,
credential=credential)
- results = list(client.search(search_text="luxucy", query_language="en-us", speller="lexicon"))
+ results = list(client.search(search_text="luxucy", query_language="en-us", query_speller="lexicon"))
for result in results:
print("{}\n{}\n)".format(result["HotelId"], result["HotelName"]))
diff --git a/sdk/search/azure-search-documents/setup.py b/sdk/search/azure-search-documents/setup.py
index 446aae1aa38c..bcaace8b39ba 100644
--- a/sdk/search/azure-search-documents/setup.py
+++ b/sdk/search/azure-search-documents/setup.py
@@ -67,6 +67,7 @@
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
@@ -78,7 +79,7 @@
'azure.search',
]),
install_requires=[
- "azure-core<2.0.0,>=1.18.0",
+ "azure-core<2.0.0,>=1.19.0",
"msrest>=0.6.21",
"azure-common~=1.1",
"typing-extensions"
diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/policies.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/policies.py
index 11fc9849998a..00068a7ba112 100644
--- a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/policies.py
+++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/policies.py
@@ -75,7 +75,7 @@ def retry_hook(settings, **kwargs):
def is_retry(response, mode):
- """Is this method/status code retryable? (Based on whitelists and control
+ """Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
@@ -456,7 +456,7 @@ def increment(self, settings, request, response=None, error=None):
else:
# Incrementing because of a server error like a 500 in
- # status_forcelist and a the given method is in the whitelist
+ # status_forcelist and a the given method is in the allowlist
if response:
settings['status'] -= 1
settings['history'].append(RequestHistory(request, http_response=response))
diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_shared/policies.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_shared/policies.py
index 11fc9849998a..00068a7ba112 100644
--- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_shared/policies.py
+++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_shared/policies.py
@@ -75,7 +75,7 @@ def retry_hook(settings, **kwargs):
def is_retry(response, mode):
- """Is this method/status code retryable? (Based on whitelists and control
+ """Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
@@ -456,7 +456,7 @@ def increment(self, settings, request, response=None, error=None):
else:
# Incrementing because of a server error like a 500 in
- # status_forcelist and a the given method is in the whitelist
+ # status_forcelist and a the given method is in the allowlist
if response:
settings['status'] -= 1
settings['history'].append(RequestHistory(request, http_response=response))
diff --git a/sdk/storage/azure-storage-file-share/azure/storage/fileshare/_shared/policies.py b/sdk/storage/azure-storage-file-share/azure/storage/fileshare/_shared/policies.py
index 11fc9849998a..00068a7ba112 100644
--- a/sdk/storage/azure-storage-file-share/azure/storage/fileshare/_shared/policies.py
+++ b/sdk/storage/azure-storage-file-share/azure/storage/fileshare/_shared/policies.py
@@ -75,7 +75,7 @@ def retry_hook(settings, **kwargs):
def is_retry(response, mode):
- """Is this method/status code retryable? (Based on whitelists and control
+ """Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
@@ -456,7 +456,7 @@ def increment(self, settings, request, response=None, error=None):
else:
# Incrementing because of a server error like a 500 in
- # status_forcelist and a the given method is in the whitelist
+ # status_forcelist and a the given method is in the allowlist
if response:
settings['status'] -= 1
settings['history'].append(RequestHistory(request, http_response=response))
diff --git a/sdk/storage/azure-storage-queue/azure/storage/queue/_shared/policies.py b/sdk/storage/azure-storage-queue/azure/storage/queue/_shared/policies.py
index 11fc9849998a..00068a7ba112 100644
--- a/sdk/storage/azure-storage-queue/azure/storage/queue/_shared/policies.py
+++ b/sdk/storage/azure-storage-queue/azure/storage/queue/_shared/policies.py
@@ -75,7 +75,7 @@ def retry_hook(settings, **kwargs):
def is_retry(response, mode):
- """Is this method/status code retryable? (Based on whitelists and control
+ """Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
@@ -456,7 +456,7 @@ def increment(self, settings, request, response=None, error=None):
else:
# Incrementing because of a server error like a 500 in
- # status_forcelist and a the given method is in the whitelist
+ # status_forcelist and a the given method is in the allowlist
if response:
settings['status'] -= 1
settings['history'].append(RequestHistory(request, http_response=response))
diff --git a/sdk/synapse/azure-synapse-accesscontrol/README.md b/sdk/synapse/azure-synapse-accesscontrol/README.md
index 5238d56b16d7..def1c2847aad 100644
--- a/sdk/synapse/azure-synapse-accesscontrol/README.md
+++ b/sdk/synapse/azure-synapse-accesscontrol/README.md
@@ -4,15 +4,14 @@ This is the Microsoft Azure Synapse AccessControl Client Library.
This package has been tested with Python 2.7, 3.6, 3.7, 3.8 and 3.9.
For a more complete view of Azure libraries, see the [azure sdk python release](https://aka.ms/azsdk/python/all).
+## _Disclaimer_
-# Usage
-
-
+_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_
+# Usage
For code examples, see [Synapse AccessControl](https://docs.microsoft.com/python/api/overview/azure/) on docs.microsoft.com.
-
# Provide Feedback
If you encounter any bugs or have suggestions, please file an issue in the
diff --git a/sdk/synapse/azure-synapse-accesscontrol/setup.py b/sdk/synapse/azure-synapse-accesscontrol/setup.py
index 771f5c1e4103..374016253dc9 100644
--- a/sdk/synapse/azure-synapse-accesscontrol/setup.py
+++ b/sdk/synapse/azure-synapse-accesscontrol/setup.py
@@ -71,6 +71,7 @@
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
diff --git a/sdk/synapse/azure-synapse-artifacts/README.md b/sdk/synapse/azure-synapse-artifacts/README.md
index 66c14aaf08be..218a58cb00ac 100644
--- a/sdk/synapse/azure-synapse-artifacts/README.md
+++ b/sdk/synapse/azure-synapse-artifacts/README.md
@@ -4,20 +4,18 @@ This is the Microsoft Azure Synapse Artifacts Client Library.
This package has been tested with Python 2.7, 3.6, 3.7, 3.8 and 3.9.
For a more complete view of Azure libraries, see the [azure sdk python release](https://aka.ms/azsdk/python/all).
+## _Disclaimer_
-# Usage
-
-
+_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_
+# Usage
For code examples, see [Synapse Artifacts](https://docs.microsoft.com/python/api/overview/azure/) on docs.microsoft.com.
-
# Provide Feedback
If you encounter any bugs or have suggestions, please file an issue in the
[Issues](https://github.com/Azure/azure-sdk-for-python/issues)
section of the project.
-
![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fazure-synapse-artifacts%2FREADME.png)
diff --git a/sdk/synapse/azure-synapse-artifacts/setup.py b/sdk/synapse/azure-synapse-artifacts/setup.py
index df7989bdbbab..5b77e11d2441 100644
--- a/sdk/synapse/azure-synapse-artifacts/setup.py
+++ b/sdk/synapse/azure-synapse-artifacts/setup.py
@@ -71,6 +71,7 @@
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
diff --git a/sdk/synapse/azure-synapse-monitoring/README.md b/sdk/synapse/azure-synapse-monitoring/README.md
index f5f8f081b36b..7fa99cef1a75 100644
--- a/sdk/synapse/azure-synapse-monitoring/README.md
+++ b/sdk/synapse/azure-synapse-monitoring/README.md
@@ -4,20 +4,18 @@ This is the Microsoft Azure Synapse Monitoring Client Library.
This package has been tested with Python 2.7, 3.6, 3.7, 3.8 and 3.9.
For a more complete view of Azure libraries, see the [azure sdk python release](https://aka.ms/azsdk/python/all).
+## _Disclaimer_
-# Usage
-
-
+_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_
+# Usage
For code examples, see [Synapse Monitoring](https://docs.microsoft.com/python/api/overview/azure/) on docs.microsoft.com.
-
# Provide Feedback
If you encounter any bugs or have suggestions, please file an issue in the
[Issues](https://github.com/Azure/azure-sdk-for-python/issues)
section of the project.
-
![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fazure-synapse-monitoring%2FREADME.png)
diff --git a/sdk/synapse/azure-synapse-monitoring/setup.py b/sdk/synapse/azure-synapse-monitoring/setup.py
index fe6cbfd07e18..69b9ea42975a 100644
--- a/sdk/synapse/azure-synapse-monitoring/setup.py
+++ b/sdk/synapse/azure-synapse-monitoring/setup.py
@@ -71,6 +71,7 @@
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
diff --git a/sdk/synapse/azure-synapse-spark/README.md b/sdk/synapse/azure-synapse-spark/README.md
index 0efdc1b23548..38027e4956ea 100644
--- a/sdk/synapse/azure-synapse-spark/README.md
+++ b/sdk/synapse/azure-synapse-spark/README.md
@@ -4,20 +4,18 @@ This is the Microsoft Azure Synapse Spark Client Library.
This package has been tested with Python 2.7, 3.6, 3.7, 3.8 and 3.9.
For a more complete view of Azure libraries, see the [azure sdk python release](https://aka.ms/azsdk/python/all).
+## _Disclaimer_
-# Usage
-
-
+_Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_
+# Usage
For code examples, see [Synapse Spark](https://docs.microsoft.com/python/api/overview/azure/) on docs.microsoft.com.
-
# Provide Feedback
If you encounter any bugs or have suggestions, please file an issue in the
[Issues](https://github.com/Azure/azure-sdk-for-python/issues)
section of the project.
-
![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fazure-synapse-spark%2FREADME.png)
diff --git a/sdk/synapse/azure-synapse-spark/setup.py b/sdk/synapse/azure-synapse-spark/setup.py
index ab3c8302fbc0..7953222cddce 100644
--- a/sdk/synapse/azure-synapse-spark/setup.py
+++ b/sdk/synapse/azure-synapse-spark/setup.py
@@ -71,6 +71,7 @@
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py
index ad5045703369..1cb05388280d 100644
--- a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py
+++ b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py
@@ -133,7 +133,7 @@ def __init__(self, **kwargs):
self.retry_to_secondary = kwargs.get('retry_to_secondary', False)
def is_retry(self, settings, response):
- """Is this method/status code retryable? (Based on whitelists and control
+ """Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
diff --git a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py
index 96139f7c5b4e..5f23dece3524 100644
--- a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py
+++ b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py
@@ -56,7 +56,7 @@ def __init__(self, **kwargs):
self.retry_to_secondary = kwargs.get('retry_to_secondary', False)
def is_retry(self, settings, response):
- """Is this method/status code retryable? (Based on whitelists and control
+ """Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
diff --git a/shared_requirements.txt b/shared_requirements.txt
index 11035e2303e0..3c9254069d21 100644
--- a/shared_requirements.txt
+++ b/shared_requirements.txt
@@ -151,9 +151,9 @@ backports.functools-lru-cache>=1.6.4
#override azure-keyvault-secrets azure-core<2.0.0,>=1.7.0
#override azure-ai-textanalytics msrest>=0.6.21
#override azure-ai-textanalytics azure-core<2.0.0,>=1.14.0
-#override azure-ai-language-questionanswering azure-core<2.0.0,>=1.16.0
+#override azure-ai-language-questionanswering azure-core<2.0.0,>=1.19.0
#override azure-ai-language-questionanswering msrest>=0.6.21
-#override azure-search-documents azure-core<2.0.0,>=1.18.0
+#override azure-search-documents azure-core<2.0.0,>=1.19.0
#override azure-ai-formrecognizer msrest>=0.6.21
#override azure-ai-formrecognizer azure-core<2.0.0,>=1.13.0
#override azure-storage-blob azure-core<2.0.0,>=1.10.0
diff --git a/tools/vcrpy/vcr/stubs/__init__.py b/tools/vcrpy/vcr/stubs/__init__.py
index 37ab137ce477..264cfaf461ea 100644
--- a/tools/vcrpy/vcr/stubs/__init__.py
+++ b/tools/vcrpy/vcr/stubs/__init__.py
@@ -26,7 +26,7 @@ def settimeout(self, *args, **kwargs):
def fileno(self):
"""
- This is kinda crappy. requests will watch
+ This is not very good. requests will watch
this descriptor and make sure it's not closed.
Return file descriptor 0 since that's stdin.
"""