Skip to content

Commit

Permalink
feat(checks): update the api
Browse files Browse the repository at this point in the history
#### checks:v1alpha

The following keys were added:
- resources.aisafety.methods.classifyContent (Total Keys: 7)
- schemas.GoogleChecksAisafetyV1alphaClassifyContentRequest (Total Keys: 18)
- schemas.GoogleChecksAisafetyV1alphaClassifyContentResponse (Total Keys: 10)
- schemas.GoogleChecksAisafetyV1alphaTextInput (Total Keys: 4)
  • Loading branch information
yoshi-automation committed Oct 30, 2024
1 parent b153244 commit a8cfcf2
Show file tree
Hide file tree
Showing 3 changed files with 339 additions and 1 deletion.
135 changes: 135 additions & 0 deletions docs/dyn/checks_v1alpha.aisafety.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
<html><body>
<style>

body, h1, h2, h3, div, span, p, pre, a {
margin: 0;
padding: 0;
border: 0;
font-weight: inherit;
font-style: inherit;
font-size: 100%;
font-family: inherit;
vertical-align: baseline;
}

body {
font-size: 13px;
padding: 1em;
}

h1 {
font-size: 26px;
margin-bottom: 1em;
}

h2 {
font-size: 24px;
margin-bottom: 1em;
}

h3 {
font-size: 20px;
margin-bottom: 1em;
margin-top: 1em;
}

pre, code {
line-height: 1.5;
font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
}

pre {
margin-top: 0.5em;
}

h1, h2, h3, p {
font-family: Arial, sans serif;
}

h1, h2, h3 {
border-bottom: solid #CCC 1px;
}

.toc_element {
margin-top: 0.5em;
}

.firstline {
margin-left: 2 em;
}

.method {
margin-top: 1em;
border: solid 1px #CCC;
padding: 1em;
background: #EEE;
}

.details {
font-weight: bold;
font-size: 14px;
}

</style>

<h1><a href="checks_v1alpha.html">Checks API</a> . <a href="checks_v1alpha.aisafety.html">aisafety</a></h1>
<h2>Instance Methods</h2>
<p class="toc_element">
<code><a href="#classifyContent">classifyContent(body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Analyze a piece of content with the provided set of policies.</p>
<p class="toc_element">
<code><a href="#close">close()</a></code></p>
<p class="firstline">Close httplib2 connections.</p>
<h3>Method Details</h3>
<div class="method">
<code class="details" id="classifyContent">classifyContent(body=None, x__xgafv=None)</code>
<pre>Analyze a piece of content with the provided set of policies.

Args:
body: object, The request body.
The object takes the form of:

{ # Request proto for ClassifyContent RPC.
&quot;classifierVersion&quot;: &quot;A String&quot;, # Optional. Version of the classifier to use. If not specified, the latest version will be used.
&quot;context&quot;: { # Context about the input that will be used to help on the classification. # Optional. Context about the input that will be used to help on the classification.
&quot;prompt&quot;: &quot;A String&quot;, # Optional. Prompt that generated the model response.
},
&quot;input&quot;: { # Content to be classified. # Required. Content to be classified.
&quot;textInput&quot;: { # Text input to be classified. # Content in text format.
&quot;content&quot;: &quot;A String&quot;, # Actual piece of text to be classified.
&quot;languageCode&quot;: &quot;A String&quot;, # Optional. Language of the text in ISO 639-1 format. If the language is invalid or not specified, the system will try to detect it.
},
},
&quot;policies&quot;: [ # Required. List of policies to classify against.
{ # List of policies to classify against.
&quot;policyType&quot;: &quot;A String&quot;, # Required. Type of the policy.
&quot;threshold&quot;: 3.14, # Optional. Score threshold to use when deciding if the content is violative or non-violative. If not specified, the default 0.5 threshold for the policy will be used.
},
],
}

x__xgafv: string, V1 error format.
Allowed values
1 - v1 error format
2 - v2 error format

Returns:
An object of the form:

{ # Response proto for ClassifyContent RPC.
&quot;policyResults&quot;: [ # Results of the classification for each policy.
{ # Result for one policy against the corresponding input.
&quot;policyType&quot;: &quot;A String&quot;, # Type of the policy.
&quot;score&quot;: 3.14, # Final score for the results of this policy.
&quot;violationResult&quot;: &quot;A String&quot;, # Result of the classification for the policy.
},
],
}</pre>
</div>

<div class="method">
<code class="details" id="close">close()</code>
<pre>Close httplib2 connections.</pre>
</div>

</body></html>
5 changes: 5 additions & 0 deletions docs/dyn/checks_v1alpha.html
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,11 @@ <h2>Instance Methods</h2>
</p>
<p class="firstline">Returns the accounts Resource.</p>

<p class="toc_element">
<code><a href="checks_v1alpha.aisafety.html">aisafety()</a></code>
</p>
<p class="firstline">Returns the aisafety Resource.</p>

<p class="toc_element">
<code><a href="checks_v1alpha.media.html">media()</a></code>
</p>
Expand Down
200 changes: 199 additions & 1 deletion googleapiclient/discovery_cache/documents/checks.v1alpha.json
Original file line number Diff line number Diff line change
Expand Up @@ -401,6 +401,25 @@
}
}
},
"aisafety": {
"methods": {
"classifyContent": {
"description": "Analyze a piece of content with the provided set of policies.",
"flatPath": "v1alpha/aisafety:classifyContent",
"httpMethod": "POST",
"id": "checks.aisafety.classifyContent",
"parameterOrder": [],
"parameters": {},
"path": "v1alpha/aisafety:classifyContent",
"request": {
"$ref": "GoogleChecksAisafetyV1alphaClassifyContentRequest"
},
"response": {
"$ref": "GoogleChecksAisafetyV1alphaClassifyContentResponse"
}
}
}
},
"media": {
"methods": {
"upload": {
Expand Down Expand Up @@ -444,7 +463,7 @@
}
}
},
"revision": "20241025",
"revision": "20241029",
"rootUrl": "https://checks.googleapis.com/",
"schemas": {
"CancelOperationRequest": {
Expand Down Expand Up @@ -492,6 +511,185 @@
},
"type": "object"
},
"GoogleChecksAisafetyV1alphaClassifyContentRequest": {
"description": "Request proto for ClassifyContent RPC.",
"id": "GoogleChecksAisafetyV1alphaClassifyContentRequest",
"properties": {
"classifierVersion": {
"description": "Optional. Version of the classifier to use. If not specified, the latest version will be used.",
"enum": [
"CLASSIFIER_VERSION_UNSPECIFIED",
"STABLE",
"LATEST"
],
"enumDescriptions": [
"Unspecified version.",
"Stable version.",
"Latest version."
],
"type": "string"
},
"context": {
"$ref": "GoogleChecksAisafetyV1alphaClassifyContentRequestContext",
"description": "Optional. Context about the input that will be used to help on the classification."
},
"input": {
"$ref": "GoogleChecksAisafetyV1alphaClassifyContentRequestInputContent",
"description": "Required. Content to be classified."
},
"policies": {
"description": "Required. List of policies to classify against.",
"items": {
"$ref": "GoogleChecksAisafetyV1alphaClassifyContentRequestPolicyConfig"
},
"type": "array"
}
},
"type": "object"
},
"GoogleChecksAisafetyV1alphaClassifyContentRequestContext": {
"description": "Context about the input that will be used to help on the classification.",
"id": "GoogleChecksAisafetyV1alphaClassifyContentRequestContext",
"properties": {
"prompt": {
"description": "Optional. Prompt that generated the model response.",
"type": "string"
}
},
"type": "object"
},
"GoogleChecksAisafetyV1alphaClassifyContentRequestInputContent": {
"description": "Content to be classified.",
"id": "GoogleChecksAisafetyV1alphaClassifyContentRequestInputContent",
"properties": {
"textInput": {
"$ref": "GoogleChecksAisafetyV1alphaTextInput",
"description": "Content in text format."
}
},
"type": "object"
},
"GoogleChecksAisafetyV1alphaClassifyContentRequestPolicyConfig": {
"description": "List of policies to classify against.",
"id": "GoogleChecksAisafetyV1alphaClassifyContentRequestPolicyConfig",
"properties": {
"policyType": {
"description": "Required. Type of the policy.",
"enum": [
"POLICY_TYPE_UNSPECIFIED",
"DANGEROUS_CONTENT",
"PII_SOLICITING_RECITING",
"HARASSMENT",
"SEXUALLY_EXPLICIT",
"HATE_SPEECH",
"MEDICAL_INFO",
"VIOLENCE_AND_GORE",
"OBSCENITY_AND_PROFANITY"
],
"enumDescriptions": [
"Default.",
"The model facilitates, promotes or enables access to harmful goods, services, and activities.",
"The model reveals an individual\u2019s personal information and data.",
"The model generates content that is malicious, intimidating, bullying, or abusive towards another individual.",
"The model generates content that is sexually explicit in nature.",
"The model promotes violence, hatred, discrimination on the basis of race, religion, etc.",
"The model facilitates harm by providing health advice or guidance.",
"The model generates content that contains gratuitous, realistic descriptions of violence or gore.",
""
],
"type": "string"
},
"threshold": {
"description": "Optional. Score threshold to use when deciding if the content is violative or non-violative. If not specified, the default 0.5 threshold for the policy will be used.",
"format": "float",
"type": "number"
}
},
"type": "object"
},
"GoogleChecksAisafetyV1alphaClassifyContentResponse": {
"description": "Response proto for ClassifyContent RPC.",
"id": "GoogleChecksAisafetyV1alphaClassifyContentResponse",
"properties": {
"policyResults": {
"description": "Results of the classification for each policy.",
"items": {
"$ref": "GoogleChecksAisafetyV1alphaClassifyContentResponsePolicyResult"
},
"type": "array"
}
},
"type": "object"
},
"GoogleChecksAisafetyV1alphaClassifyContentResponsePolicyResult": {
"description": "Result for one policy against the corresponding input.",
"id": "GoogleChecksAisafetyV1alphaClassifyContentResponsePolicyResult",
"properties": {
"policyType": {
"description": "Type of the policy.",
"enum": [
"POLICY_TYPE_UNSPECIFIED",
"DANGEROUS_CONTENT",
"PII_SOLICITING_RECITING",
"HARASSMENT",
"SEXUALLY_EXPLICIT",
"HATE_SPEECH",
"MEDICAL_INFO",
"VIOLENCE_AND_GORE",
"OBSCENITY_AND_PROFANITY"
],
"enumDescriptions": [
"Default.",
"The model facilitates, promotes or enables access to harmful goods, services, and activities.",
"The model reveals an individual\u2019s personal information and data.",
"The model generates content that is malicious, intimidating, bullying, or abusive towards another individual.",
"The model generates content that is sexually explicit in nature.",
"The model promotes violence, hatred, discrimination on the basis of race, religion, etc.",
"The model facilitates harm by providing health advice or guidance.",
"The model generates content that contains gratuitous, realistic descriptions of violence or gore.",
""
],
"type": "string"
},
"score": {
"description": "Final score for the results of this policy.",
"format": "float",
"type": "number"
},
"violationResult": {
"description": "Result of the classification for the policy.",
"enum": [
"VIOLATION_RESULT_UNSPECIFIED",
"VIOLATIVE",
"NON_VIOLATIVE",
"CLASSIFICATION_ERROR"
],
"enumDescriptions": [
"Unspecified result.",
"The final score is greater or equal the input score threshold.",
"The final score is smaller than the input score threshold.",
"There was an error and the violation result could not be determined."
],
"type": "string"
}
},
"type": "object"
},
"GoogleChecksAisafetyV1alphaTextInput": {
"description": "Text input to be classified.",
"id": "GoogleChecksAisafetyV1alphaTextInput",
"properties": {
"content": {
"description": "Actual piece of text to be classified.",
"type": "string"
},
"languageCode": {
"description": "Optional. Language of the text in ISO 639-1 format. If the language is invalid or not specified, the system will try to detect it.",
"type": "string"
}
},
"type": "object"
},
"GoogleChecksReportV1alphaAnalyzeUploadRequest": {
"description": "The request message for ReportService.AnalyzeUpload.",
"id": "GoogleChecksReportV1alphaAnalyzeUploadRequest",
Expand Down

0 comments on commit a8cfcf2

Please sign in to comment.