Skip to content

Commit cb25db6

Browse files
author
docsreference@microsoft.com
committed
1 parent 51c1140 commit cb25db6

File tree

1,348 files changed

+15602
-15563
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,348 files changed

+15602
-15563
lines changed

docs-ref-autogen/com.azure.ai.contentsafety.BlocklistAsyncClient.yml

+11-11
Large diffs are not rendered by default.

docs-ref-autogen/com.azure.ai.contentsafety.ContentSafetyAsyncClient.yml

+7-7
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ methods:
3535
desc: "Analyze Image A synchronous API for the analysis of potentially harmful image content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
3636
returns:
3737
description: "the image analysis response on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
38-
type: "<xref href=\"reactor.core.publisher.Mono?alt=reactor.core.publisher.Mono&text=Mono\" data-throw-if-not-resolved=\"False\" />&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageResult?alt=com.azure.ai.contentsafety.models.AnalyzeImageResult&text=AnalyzeImageResult\" data-throw-if-not-resolved=\"False\" />&gt;"
38+
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageResult?alt=com.azure.ai.contentsafety.models.AnalyzeImageResult&text=AnalyzeImageResult\" data-throw-if-not-resolved=\"False\" />&gt;"
3939
- uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImage(com.azure.core.util.BinaryData)"
4040
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImage(BinaryData content)"
4141
name: "analyzeImage(BinaryData content)"
@@ -49,7 +49,7 @@ methods:
4949
desc: "Analyze Image A synchronous API for the analysis of potentially harmful image content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
5050
returns:
5151
description: "the image analysis response on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
52-
type: "<xref href=\"reactor.core.publisher.Mono?alt=reactor.core.publisher.Mono&text=Mono\" data-throw-if-not-resolved=\"False\" />&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageResult?alt=com.azure.ai.contentsafety.models.AnalyzeImageResult&text=AnalyzeImageResult\" data-throw-if-not-resolved=\"False\" />&gt;"
52+
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageResult?alt=com.azure.ai.contentsafety.models.AnalyzeImageResult&text=AnalyzeImageResult\" data-throw-if-not-resolved=\"False\" />&gt;"
5353
- uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImage(java.lang.String)"
5454
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImage(String blobUrl)"
5555
name: "analyzeImage(String blobUrl)"
@@ -63,7 +63,7 @@ methods:
6363
desc: "Analyze Image A synchronous API for the analysis of potentially harmful image content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
6464
returns:
6565
description: "the image analysis response on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
66-
type: "<xref href=\"reactor.core.publisher.Mono?alt=reactor.core.publisher.Mono&text=Mono\" data-throw-if-not-resolved=\"False\" />&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageResult?alt=com.azure.ai.contentsafety.models.AnalyzeImageResult&text=AnalyzeImageResult\" data-throw-if-not-resolved=\"False\" />&gt;"
66+
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeImageResult?alt=com.azure.ai.contentsafety.models.AnalyzeImageResult&text=AnalyzeImageResult\" data-throw-if-not-resolved=\"False\" />&gt;"
6767
- uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImageWithResponse(com.azure.core.util.BinaryData,com.azure.core.http.rest.RequestOptions)"
6868
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeImageWithResponse(BinaryData options, RequestOptions requestOptions)"
6969
name: "analyzeImageWithResponse(BinaryData options, RequestOptions requestOptions)"
@@ -80,7 +80,7 @@ methods:
8080
desc: "Analyze Image A synchronous API for the analysis of potentially harmful image content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence.\n\n**Request Body Schema**\n\n```java\n{\n image (Required): {\n content: byte[] (Optional)\n blobUrl: String (Optional)\n }\n categories (Optional): [\n String(Hate/SelfHarm/Sexual/Violence) (Optional)\n ]\n outputType: String(FourSeverityLevels) (Optional)\n }\n```\n\n**Response Body Schema**\n\n```java\n{\n categoriesAnalysis (Required): [\n (Required){\n category: String(Hate/SelfHarm/Sexual/Violence) (Required)\n severity: Integer (Optional)\n }\n ]\n }\n```"
8181
returns:
8282
description: "the image analysis response along with <xref uid=\"com.azure.core.http.rest.Response\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Response\"></xref> on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
83-
type: "<xref href=\"reactor.core.publisher.Mono?alt=reactor.core.publisher.Mono&text=Mono\" data-throw-if-not-resolved=\"False\" />&lt;<xref href=\"com.azure.core.http.rest.Response?alt=com.azure.core.http.rest.Response&text=Response\" data-throw-if-not-resolved=\"False\" />&lt;<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />&gt;&gt;"
83+
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.core.http.rest.Response?alt=com.azure.core.http.rest.Response&text=Response\" data-throw-if-not-resolved=\"False\" />&lt;<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />&gt;&gt;"
8484
- uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeText(com.azure.ai.contentsafety.models.AnalyzeTextOptions)"
8585
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeText(AnalyzeTextOptions options)"
8686
name: "analyzeText(AnalyzeTextOptions options)"
@@ -94,7 +94,7 @@ methods:
9494
desc: "Analyze Text A synchronous API for the analysis of potentially harmful text content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
9595
returns:
9696
description: "the text analysis response on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
97-
type: "<xref href=\"reactor.core.publisher.Mono?alt=reactor.core.publisher.Mono&text=Mono\" data-throw-if-not-resolved=\"False\" />&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeTextResult?alt=com.azure.ai.contentsafety.models.AnalyzeTextResult&text=AnalyzeTextResult\" data-throw-if-not-resolved=\"False\" />&gt;"
97+
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeTextResult?alt=com.azure.ai.contentsafety.models.AnalyzeTextResult&text=AnalyzeTextResult\" data-throw-if-not-resolved=\"False\" />&gt;"
9898
- uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeText(java.lang.String)"
9999
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeText(String text)"
100100
name: "analyzeText(String text)"
@@ -108,7 +108,7 @@ methods:
108108
desc: "Analyze Text A synchronous API for the analysis of potentially harmful text content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence."
109109
returns:
110110
description: "the text analysis response on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
111-
type: "<xref href=\"reactor.core.publisher.Mono?alt=reactor.core.publisher.Mono&text=Mono\" data-throw-if-not-resolved=\"False\" />&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeTextResult?alt=com.azure.ai.contentsafety.models.AnalyzeTextResult&text=AnalyzeTextResult\" data-throw-if-not-resolved=\"False\" />&gt;"
111+
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.ai.contentsafety.models.AnalyzeTextResult?alt=com.azure.ai.contentsafety.models.AnalyzeTextResult&text=AnalyzeTextResult\" data-throw-if-not-resolved=\"False\" />&gt;"
112112
- uid: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeTextWithResponse(com.azure.core.util.BinaryData,com.azure.core.http.rest.RequestOptions)"
113113
fullName: "com.azure.ai.contentsafety.ContentSafetyAsyncClient.analyzeTextWithResponse(BinaryData options, RequestOptions requestOptions)"
114114
name: "analyzeTextWithResponse(BinaryData options, RequestOptions requestOptions)"
@@ -125,7 +125,7 @@ methods:
125125
desc: "Analyze Text A synchronous API for the analysis of potentially harmful text content. Currently, it supports four categories: Hate, SelfHarm, Sexual, and Violence.\n\n**Request Body Schema**\n\n```java\n{\n text: String (Required)\n categories (Optional): [\n String(Hate/SelfHarm/Sexual/Violence) (Optional)\n ]\n blocklistNames (Optional): [\n String (Optional)\n ]\n haltOnBlocklistHit: Boolean (Optional)\n outputType: String(FourSeverityLevels/EightSeverityLevels) (Optional)\n }\n```\n\n**Response Body Schema**\n\n```java\n{\n blocklistsMatch (Optional): [\n (Optional){\n blocklistName: String (Required)\n blocklistItemId: String (Required)\n blocklistItemText: String (Required)\n }\n ]\n categoriesAnalysis (Required): [\n (Required){\n category: String(Hate/SelfHarm/Sexual/Violence) (Required)\n severity: Integer (Optional)\n }\n ]\n }\n```"
126126
returns:
127127
description: "the text analysis response along with <xref uid=\"com.azure.core.http.rest.Response\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Response\"></xref> on successful completion of <xref uid=\"reactor.core.publisher.Mono\" data-throw-if-not-resolved=\"false\" data-raw-source=\"Mono\"></xref>."
128-
type: "<xref href=\"reactor.core.publisher.Mono?alt=reactor.core.publisher.Mono&text=Mono\" data-throw-if-not-resolved=\"False\" />&lt;<xref href=\"com.azure.core.http.rest.Response?alt=com.azure.core.http.rest.Response&text=Response\" data-throw-if-not-resolved=\"False\" />&lt;<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />&gt;&gt;"
128+
type: "<a href=\"https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html\">Mono</a>&lt;<xref href=\"com.azure.core.http.rest.Response?alt=com.azure.core.http.rest.Response&text=Response\" data-throw-if-not-resolved=\"False\" />&lt;<xref href=\"com.azure.core.util.BinaryData?alt=com.azure.core.util.BinaryData&text=BinaryData\" data-throw-if-not-resolved=\"False\" />&gt;&gt;"
129129
type: "class"
130130
desc: "Initializes a new instance of the asynchronous ContentSafetyClient type."
131131
metadata: {}

0 commit comments

Comments
 (0)