Skip to content

Commit

Permalink
Google APIs updated at 2025-02-02 (#184)
Browse files Browse the repository at this point in the history
Co-authored-by: GitHub Actions <>
  • Loading branch information
github-actions[bot] authored Feb 2, 2025
1 parent dcf3c5b commit e097c1d
Show file tree
Hide file tree
Showing 53 changed files with 20,210 additions and 729 deletions.
19 changes: 16 additions & 3 deletions gcloud-sdk/genproto/google.ai.generativelanguage.v1.rs
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ impl HarmCategory {
pub struct GenerateContentRequest {
/// Required. The name of the `Model` to use for generating the completion.
///
/// Format: `name=models/{model}`.
/// Format: `models/{model}`.
#[prost(string, tag = "1")]
pub model: ::prost::alloc::string::String,
/// Required. The content of the current conversation with the model.
Expand All @@ -338,8 +338,8 @@ pub struct GenerateContentRequest {
/// `SafetyCategory` provided in the list, the API will use the default safety
/// setting for that category. Harm categories HARM_CATEGORY_HATE_SPEECH,
/// HARM_CATEGORY_SEXUALLY_EXPLICIT, HARM_CATEGORY_DANGEROUS_CONTENT,
/// HARM_CATEGORY_HARASSMENT are supported. Refer to the
/// [guide](<https://ai.google.dev/gemini-api/docs/safety-settings>)
/// HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_CIVIC_INTEGRITY are supported.
/// Refer to the [guide](<https://ai.google.dev/gemini-api/docs/safety-settings>)
/// for detailed information on available safety settings. Also refer to the
/// [Safety guidance](<https://ai.google.dev/gemini-api/docs/safety-guidance>) to
/// learn how to incorporate safety considerations in your AI applications.
Expand Down Expand Up @@ -449,6 +449,10 @@ pub struct GenerationConfig {
/// [Candidate.logprobs_result][google.ai.generativelanguage.v1.Candidate.logprobs_result].
#[prost(int32, optional, tag = "18")]
pub logprobs: ::core::option::Option<i32>,
/// Optional. Enables enhanced civic answers. It may not be available for all
/// models.
#[prost(bool, optional, tag = "19")]
pub enable_enhanced_civic_answers: ::core::option::Option<bool>,
}
/// Response from the model supporting multiple candidate responses.
///
Expand Down Expand Up @@ -520,6 +524,8 @@ pub mod generate_content_response {
Blocklist = 3,
/// Prompt was blocked due to prohibited content.
ProhibitedContent = 4,
/// Candidates blocked due to unsafe image generation content.
ImageSafety = 5,
}
impl BlockReason {
/// String value of the enum field names used in the ProtoBuf definition.
Expand All @@ -533,6 +539,7 @@ pub mod generate_content_response {
Self::Other => "OTHER",
Self::Blocklist => "BLOCKLIST",
Self::ProhibitedContent => "PROHIBITED_CONTENT",
Self::ImageSafety => "IMAGE_SAFETY",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
Expand All @@ -543,6 +550,7 @@ pub mod generate_content_response {
"OTHER" => Some(Self::Other),
"BLOCKLIST" => Some(Self::Blocklist),
"PROHIBITED_CONTENT" => Some(Self::ProhibitedContent),
"IMAGE_SAFETY" => Some(Self::ImageSafety),
_ => None,
}
}
Expand Down Expand Up @@ -646,6 +654,9 @@ pub mod candidate {
Spii = 9,
/// The function call generated by the model is invalid.
MalformedFunctionCall = 10,
/// Token generation stopped because generated images contain safety
/// violations.
ImageSafety = 11,
}
impl FinishReason {
/// String value of the enum field names used in the ProtoBuf definition.
Expand All @@ -665,6 +676,7 @@ pub mod candidate {
Self::ProhibitedContent => "PROHIBITED_CONTENT",
Self::Spii => "SPII",
Self::MalformedFunctionCall => "MALFORMED_FUNCTION_CALL",
Self::ImageSafety => "IMAGE_SAFETY",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
Expand All @@ -681,6 +693,7 @@ pub mod candidate {
"PROHIBITED_CONTENT" => Some(Self::ProhibitedContent),
"SPII" => Some(Self::Spii),
"MALFORMED_FUNCTION_CALL" => Some(Self::MalformedFunctionCall),
"IMAGE_SAFETY" => Some(Self::ImageSafety),
_ => None,
}
}
Expand Down
Loading

0 comments on commit e097c1d

Please sign in to comment.